=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-513442 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [e21ee73b-578f-48c9-826d-ab3b4bbb7871] Pending
helpers_test.go:352: "busybox" [e21ee73b-578f-48c9-826d-ab3b4bbb7871] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [e21ee73b-578f-48c9-826d-ab3b4bbb7871] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 10.003551417s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-513442 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-513442
helpers_test.go:243: (dbg) docker inspect old-k8s-version-513442:
-- stdout --
[
{
"Id": "13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc",
"Created": "2025-11-24T13:47:35.092444426Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 609088,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T13:47:35.135903717Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/hostname",
"HostsPath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/hosts",
"LogPath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc-json.log",
"Name": "/old-k8s-version-513442",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-513442:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-513442",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc",
"LowerDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338-init/diff:/var/lib/docker/overlay2/0f013e03fd0eaee4efc608fb0376e7d3e8ba628388f5191310c2259ab273ad26/diff",
"MergedDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338/merged",
"UpperDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338/diff",
"WorkDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-513442",
"Source": "/var/lib/docker/volumes/old-k8s-version-513442/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-513442",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-513442",
"name.minikube.sigs.k8s.io": "old-k8s-version-513442",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "712b075dd23c6c1fbc5bbaa3b37767187ba4a40be8134789ce23d7e72a4abc25",
"SandboxKey": "/var/run/docker/netns/712b075dd23c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33435"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33436"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33440"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33437"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33439"
}
]
},
"Networks": {
"old-k8s-version-513442": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "57f535f2d59b940a7e2130a9a6bcf664e3f052e878c97575bfeea5e13ed58e73",
"EndpointID": "439facefab95f9d1822733d1b1004570b6d417a88dc9a1ee26ae6d774889308f",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"MacAddress": "46:21:b5:12:37:e7",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-513442",
"13426d2cf76c"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-513442 -n old-k8s-version-513442
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-513442 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-513442 logs -n 25: (1.2175157s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ ssh │ -p cilium-355661 sudo cat /lib/systemd/system/containerd.service │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo cat /etc/containerd/config.toml │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo containerd config dump │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo systemctl status crio --all --full --no-pager │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo systemctl cat crio --no-pager │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ start │ -p NoKubernetes-787855 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:46 UTC │
│ ssh │ -p cilium-355661 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo crio config │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ delete │ -p cilium-355661 │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:46 UTC │
│ start │ -p force-systemd-flag-775412 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-775412 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:47 UTC │
│ delete │ -p NoKubernetes-787855 │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:46 UTC │
│ start │ -p NoKubernetes-787855 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ force-systemd-flag-775412 ssh cat /etc/containerd/config.toml │ force-systemd-flag-775412 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ delete │ -p force-systemd-flag-775412 │ force-systemd-flag-775412 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ -p NoKubernetes-787855 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ │
│ start │ -p cert-options-342221 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ stop │ -p NoKubernetes-787855 │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ start │ -p NoKubernetes-787855 --driver=docker --container-runtime=containerd │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ cert-options-342221 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ -p cert-options-342221 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ delete │ -p cert-options-342221 │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ start │ -p old-k8s-version-513442 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-513442 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:48 UTC │
│ ssh │ -p NoKubernetes-787855 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ │
│ delete │ -p NoKubernetes-787855 │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ start │ -p no-preload-608395 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-608395 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:48 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/24 13:47:35
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 13:47:35.072446 608917 out.go:360] Setting OutFile to fd 1 ...
I1124 13:47:35.072749 608917 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:47:35.072763 608917 out.go:374] Setting ErrFile to fd 2...
I1124 13:47:35.072768 608917 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:47:35.073046 608917 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21932-370498/.minikube/bin
I1124 13:47:35.073526 608917 out.go:368] Setting JSON to false
I1124 13:47:35.074857 608917 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":8994,"bootTime":1763983061,"procs":340,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1124 13:47:35.074959 608917 start.go:143] virtualization: kvm guest
I1124 13:47:35.077490 608917 out.go:179] * [no-preload-608395] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1124 13:47:35.079255 608917 out.go:179] - MINIKUBE_LOCATION=21932
I1124 13:47:35.079255 608917 notify.go:221] Checking for updates...
I1124 13:47:35.080776 608917 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 13:47:35.082396 608917 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21932-370498/kubeconfig
I1124 13:47:35.083932 608917 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21932-370498/.minikube
I1124 13:47:35.085251 608917 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1124 13:47:35.086603 608917 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 13:47:35.089427 608917 config.go:182] Loaded profile config "cert-expiration-099863": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:47:35.089575 608917 config.go:182] Loaded profile config "kubernetes-upgrade-358357": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:47:35.089706 608917 config.go:182] Loaded profile config "old-k8s-version-513442": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:47:35.089837 608917 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 13:47:35.114581 608917 docker.go:124] docker version: linux-29.0.3:Docker Engine - Community
I1124 13:47:35.114769 608917 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:47:35.180508 608917 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:58 OomKillDisable:false NGoroutines:78 SystemTime:2025-11-24 13:47:35.169616068 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 13:47:35.180627 608917 docker.go:319] overlay module found
I1124 13:47:35.182258 608917 out.go:179] * Using the docker driver based on user configuration
I1124 13:47:35.183642 608917 start.go:309] selected driver: docker
I1124 13:47:35.183663 608917 start.go:927] validating driver "docker" against <nil>
I1124 13:47:35.183675 608917 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 13:47:35.184437 608917 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:47:35.249663 608917 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:58 OomKillDisable:false NGoroutines:78 SystemTime:2025-11-24 13:47:35.237755455 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 13:47:35.249975 608917 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 13:47:35.250402 608917 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:47:35.252318 608917 out.go:179] * Using Docker driver with root privileges
I1124 13:47:35.254354 608917 cni.go:84] Creating CNI manager for ""
I1124 13:47:35.254446 608917 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:35.254457 608917 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 13:47:35.254652 608917 start.go:353] cluster config:
{Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:47:35.256201 608917 out.go:179] * Starting "no-preload-608395" primary control-plane node in "no-preload-608395" cluster
I1124 13:47:35.257392 608917 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 13:47:35.258857 608917 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1124 13:47:35.260330 608917 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 13:47:35.260404 608917 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1124 13:47:35.260496 608917 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/config.json ...
I1124 13:47:35.260537 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/config.json: {Name:mk2f4d5eff7070dcec35f39f30e01cd0b3fcce8c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:35.260546 608917 cache.go:107] acquiring lock: {Name:mk28ec677a69a6f418643b8b89331fa25b8c42f3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260546 608917 cache.go:107] acquiring lock: {Name:mkad3cbb6fa2e7f41e4d7c0e1e3c74156dc55521 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260557 608917 cache.go:107] acquiring lock: {Name:mk7aef7fc4ff6e4e4541fdeb1d5e26c13a66856b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260584 608917 cache.go:107] acquiring lock: {Name:mk586ecbe7f4b4aab48f8ad28d0d7b1848898c9c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260604 608917 cache.go:107] acquiring lock: {Name:mkf548ea8c9721a4e4ad1e37073c3deea8530810 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260622 608917 cache.go:107] acquiring lock: {Name:mk1ce266bd6b9003a6a371facbc84809dce0c3c8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260651 608917 cache.go:107] acquiring lock: {Name:mk687b2dcc146d43e9d607f472f2f08a2307baed Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260663 608917 cache.go:107] acquiring lock: {Name:mk4b559f0fdae6e96edea26981618bf8d9d50b2d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260712 608917 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:35.260755 608917 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:35.260801 608917 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:35.260819 608917 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:35.260852 608917 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:35.260858 608917 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1124 13:47:35.260727 608917 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:35.261039 608917 cache.go:115] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1124 13:47:35.261050 608917 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 523.955µs
I1124 13:47:35.261069 608917 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1124 13:47:35.262249 608917 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:35.262277 608917 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:35.262359 608917 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:35.262407 608917 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1124 13:47:35.262461 608917 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:35.262522 608917 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:35.262735 608917 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:35.285963 608917 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1124 13:47:35.285989 608917 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1124 13:47:35.286014 608917 cache.go:240] Successfully downloaded all kic artifacts
I1124 13:47:35.286057 608917 start.go:360] acquireMachinesLock for no-preload-608395: {Name:mkc9d1cf0cec9be2b369f1e47c690fc0399e88e2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.286191 608917 start.go:364] duration metric: took 102.178µs to acquireMachinesLock for "no-preload-608395"
I1124 13:47:35.286224 608917 start.go:93] Provisioning new machine with config: &{Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:47:35.286330 608917 start.go:125] createHost starting for "" (driver="docker")
I1124 13:47:30.558317 607669 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 13:47:30.558626 607669 start.go:159] libmachine.API.Create for "old-k8s-version-513442" (driver="docker")
I1124 13:47:30.558656 607669 client.go:173] LocalClient.Create starting
I1124 13:47:30.558725 607669 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem
I1124 13:47:30.558754 607669 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:30.558772 607669 main.go:143] libmachine: Parsing certificate...
I1124 13:47:30.558826 607669 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem
I1124 13:47:30.558849 607669 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:30.558860 607669 main.go:143] libmachine: Parsing certificate...
I1124 13:47:30.559212 607669 cli_runner.go:164] Run: docker network inspect old-k8s-version-513442 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 13:47:30.577139 607669 cli_runner.go:211] docker network inspect old-k8s-version-513442 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 13:47:30.577245 607669 network_create.go:284] running [docker network inspect old-k8s-version-513442] to gather additional debugging logs...
I1124 13:47:30.577276 607669 cli_runner.go:164] Run: docker network inspect old-k8s-version-513442
W1124 13:47:30.593786 607669 cli_runner.go:211] docker network inspect old-k8s-version-513442 returned with exit code 1
I1124 13:47:30.593826 607669 network_create.go:287] error running [docker network inspect old-k8s-version-513442]: docker network inspect old-k8s-version-513442: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-513442 not found
I1124 13:47:30.593854 607669 network_create.go:289] output of [docker network inspect old-k8s-version-513442]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-513442 not found
** /stderr **
I1124 13:47:30.594026 607669 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:30.613315 607669 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-8afb578efdfa IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:7a:5e:46:43:aa:fe} reservation:<nil>}
I1124 13:47:30.614364 607669 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-ca3a55f53176 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ce:98:62:4c:91:8f} reservation:<nil>}
I1124 13:47:30.614827 607669 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-e11236ccf9ba IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:36:3b:80:be:95:34} reservation:<nil>}
I1124 13:47:30.615410 607669 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-35b7bf6fd97a IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:5a:12:4e:d4:19:26} reservation:<nil>}
I1124 13:47:30.616018 607669 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-1f5932eecbe7 IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:aa:ff:d3:cd:de:0f} reservation:<nil>}
I1124 13:47:30.617269 607669 network.go:206] using free private subnet 192.168.94.0/24: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e7fa00}
I1124 13:47:30.617308 607669 network_create.go:124] attempt to create docker network old-k8s-version-513442 192.168.94.0/24 with gateway 192.168.94.1 and MTU of 1500 ...
I1124 13:47:30.617398 607669 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.94.0/24 --gateway=192.168.94.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-513442 old-k8s-version-513442
I1124 13:47:30.671102 607669 network_create.go:108] docker network old-k8s-version-513442 192.168.94.0/24 created
I1124 13:47:30.671150 607669 kic.go:121] calculated static IP "192.168.94.2" for the "old-k8s-version-513442" container
I1124 13:47:30.671218 607669 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 13:47:30.689078 607669 cli_runner.go:164] Run: docker volume create old-k8s-version-513442 --label name.minikube.sigs.k8s.io=old-k8s-version-513442 --label created_by.minikube.sigs.k8s.io=true
I1124 13:47:30.709312 607669 oci.go:103] Successfully created a docker volume old-k8s-version-513442
I1124 13:47:30.709408 607669 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-513442-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-513442 --entrypoint /usr/bin/test -v old-k8s-version-513442:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 13:47:31.132905 607669 oci.go:107] Successfully prepared a docker volume old-k8s-version-513442
I1124 13:47:31.132980 607669 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:47:31.132992 607669 kic.go:194] Starting extracting preloaded images to volume ...
I1124 13:47:31.133075 607669 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-370498/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-513442:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1124 13:47:35.011677 607669 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-370498/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-513442:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (3.878547269s)
I1124 13:47:35.011716 607669 kic.go:203] duration metric: took 3.878721361s to extract preloaded images to volume ...
W1124 13:47:35.011796 607669 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 13:47:35.011829 607669 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 13:47:35.011871 607669 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 13:47:35.073961 607669 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-513442 --name old-k8s-version-513442 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-513442 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-513442 --network old-k8s-version-513442 --ip 192.168.94.2 --volume old-k8s-version-513442:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 13:47:32.801968 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:32.802485 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:47:32.802542 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:32.802595 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:32.832902 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:32.832956 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:32.832963 572647 cri.go:89] found id: ""
I1124 13:47:32.832972 572647 logs.go:282] 2 containers: [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:32.833038 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.837621 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.841927 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:32.842013 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:32.877193 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:32.877214 572647 cri.go:89] found id: ""
I1124 13:47:32.877223 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:32.877290 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.882239 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:32.882329 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:32.912677 572647 cri.go:89] found id: ""
I1124 13:47:32.912709 572647 logs.go:282] 0 containers: []
W1124 13:47:32.912727 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:32.912735 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:32.912799 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:32.942634 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:32.942656 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:32.942662 572647 cri.go:89] found id: ""
I1124 13:47:32.942672 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:32.942735 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.947427 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.951442 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:32.951519 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:32.982583 572647 cri.go:89] found id: ""
I1124 13:47:32.982614 572647 logs.go:282] 0 containers: []
W1124 13:47:32.982626 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:32.982635 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:32.982706 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:33.013412 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:33.013432 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:33.013435 572647 cri.go:89] found id: ""
I1124 13:47:33.013444 572647 logs.go:282] 2 containers: [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:33.013492 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:33.017848 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:33.021955 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:33.022038 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:33.055691 572647 cri.go:89] found id: ""
I1124 13:47:33.055722 572647 logs.go:282] 0 containers: []
W1124 13:47:33.055733 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:33.055743 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:33.055822 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:33.086844 572647 cri.go:89] found id: ""
I1124 13:47:33.086868 572647 logs.go:282] 0 containers: []
W1124 13:47:33.086877 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:33.086887 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:33.086904 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:33.140737 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:33.140775 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:33.185221 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:33.185259 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:33.218642 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:33.218669 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:33.251506 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:33.251634 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:33.346627 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:33.346672 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:33.363530 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:33.363571 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:33.400997 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:33.401042 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:33.446051 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:33.446088 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:33.484418 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:33.484465 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:33.537056 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:33.537093 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:47:33.611727 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:47:33.611762 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:33.611778 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:36.150015 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:36.150435 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:47:36.150499 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:36.150559 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:36.181496 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:36.181524 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:36.181530 572647 cri.go:89] found id: ""
I1124 13:47:36.181541 572647 logs.go:282] 2 containers: [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:36.181626 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.186587 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.190995 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:36.191076 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:35.288531 608917 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 13:47:35.288826 608917 start.go:159] libmachine.API.Create for "no-preload-608395" (driver="docker")
I1124 13:47:35.288879 608917 client.go:173] LocalClient.Create starting
I1124 13:47:35.288981 608917 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem
I1124 13:47:35.289027 608917 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:35.289053 608917 main.go:143] libmachine: Parsing certificate...
I1124 13:47:35.289129 608917 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem
I1124 13:47:35.289159 608917 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:35.289172 608917 main.go:143] libmachine: Parsing certificate...
I1124 13:47:35.289667 608917 cli_runner.go:164] Run: docker network inspect no-preload-608395 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 13:47:35.309178 608917 cli_runner.go:211] docker network inspect no-preload-608395 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 13:47:35.309257 608917 network_create.go:284] running [docker network inspect no-preload-608395] to gather additional debugging logs...
I1124 13:47:35.309283 608917 cli_runner.go:164] Run: docker network inspect no-preload-608395
W1124 13:47:35.328323 608917 cli_runner.go:211] docker network inspect no-preload-608395 returned with exit code 1
I1124 13:47:35.328350 608917 network_create.go:287] error running [docker network inspect no-preload-608395]: docker network inspect no-preload-608395: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-608395 not found
I1124 13:47:35.328362 608917 network_create.go:289] output of [docker network inspect no-preload-608395]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-608395 not found
** /stderr **
I1124 13:47:35.328448 608917 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:35.351281 608917 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-8afb578efdfa IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:7a:5e:46:43:aa:fe} reservation:<nil>}
I1124 13:47:35.352105 608917 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-ca3a55f53176 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ce:98:62:4c:91:8f} reservation:<nil>}
I1124 13:47:35.352583 608917 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-e11236ccf9ba IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:36:3b:80:be:95:34} reservation:<nil>}
I1124 13:47:35.353066 608917 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-35b7bf6fd97a IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:5a:12:4e:d4:19:26} reservation:<nil>}
I1124 13:47:35.353566 608917 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-1f5932eecbe7 IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:aa:ff:d3:cd:de:0f} reservation:<nil>}
I1124 13:47:35.354145 608917 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-57f535f2d59b IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:6e:28:a9:1e:8a:96} reservation:<nil>}
I1124 13:47:35.354775 608917 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001d86bc0}
I1124 13:47:35.354805 608917 network_create.go:124] attempt to create docker network no-preload-608395 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1124 13:47:35.354861 608917 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-608395 no-preload-608395
I1124 13:47:35.432539 608917 network_create.go:108] docker network no-preload-608395 192.168.103.0/24 created
I1124 13:47:35.432598 608917 kic.go:121] calculated static IP "192.168.103.2" for the "no-preload-608395" container
I1124 13:47:35.432695 608917 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 13:47:35.453593 608917 cli_runner.go:164] Run: docker volume create no-preload-608395 --label name.minikube.sigs.k8s.io=no-preload-608395 --label created_by.minikube.sigs.k8s.io=true
I1124 13:47:35.471825 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1124 13:47:35.475329 608917 oci.go:103] Successfully created a docker volume no-preload-608395
I1124 13:47:35.475418 608917 cli_runner.go:164] Run: docker run --rm --name no-preload-608395-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-608395 --entrypoint /usr/bin/test -v no-preload-608395:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 13:47:35.484374 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1124 13:47:35.522730 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1124 13:47:35.528813 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1124 13:47:35.529239 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1124 13:47:35.541677 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1124 13:47:35.561542 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1124 13:47:35.640840 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1124 13:47:35.640868 608917 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 380.250244ms
I1124 13:47:35.640883 608917 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1124 13:47:35.985260 608917 oci.go:107] Successfully prepared a docker volume no-preload-608395
I1124 13:47:35.985319 608917 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1124 13:47:35.985414 608917 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 13:47:35.985453 608917 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 13:47:35.985506 608917 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 13:47:36.047047 608917 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-608395 --name no-preload-608395 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-608395 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-608395 --network no-preload-608395 --ip 192.168.103.2 --volume no-preload-608395:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 13:47:36.258467 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1124 13:47:36.258503 608917 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 997.955969ms
I1124 13:47:36.258519 608917 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1124 13:47:36.410125 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Running}}
I1124 13:47:36.432289 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:47:36.453312 608917 cli_runner.go:164] Run: docker exec no-preload-608395 stat /var/lib/dpkg/alternatives/iptables
I1124 13:47:36.504193 608917 oci.go:144] the created container "no-preload-608395" has a running status.
I1124 13:47:36.504226 608917 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa...
I1124 13:47:36.604837 608917 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 13:47:36.631267 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:47:36.655799 608917 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 13:47:36.655830 608917 kic_runner.go:114] Args: [docker exec --privileged no-preload-608395 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 13:47:36.705661 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:47:36.729778 608917 machine.go:94] provisionDockerMachine start ...
I1124 13:47:36.729884 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:36.756901 608917 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:36.757367 608917 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33441 <nil> <nil>}
I1124 13:47:36.757380 608917 main.go:143] libmachine: About to run SSH command:
hostname
I1124 13:47:36.758446 608917 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1124 13:47:37.510037 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1124 13:47:37.510068 608917 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 2.249448579s
I1124 13:47:37.510081 608917 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1124 13:47:37.572176 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1124 13:47:37.572211 608917 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 2.31168357s
I1124 13:47:37.572229 608917 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1124 13:47:37.595833 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1124 13:47:37.595868 608917 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 2.335217312s
I1124 13:47:37.595886 608917 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1124 13:47:37.719899 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1124 13:47:37.719956 608917 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 2.45935214s
I1124 13:47:37.719969 608917 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1124 13:47:38.059972 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1124 13:47:38.060022 608917 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 2.799433794s
I1124 13:47:38.060036 608917 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1124 13:47:38.060055 608917 cache.go:87] Successfully saved all images to host disk.
I1124 13:47:39.915534 608917 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-608395
I1124 13:47:39.915567 608917 ubuntu.go:182] provisioning hostname "no-preload-608395"
I1124 13:47:39.915651 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:39.936421 608917 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:39.936658 608917 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33441 <nil> <nil>}
I1124 13:47:39.936672 608917 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-608395 && echo "no-preload-608395" | sudo tee /etc/hostname
I1124 13:47:35.415632 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Running}}
I1124 13:47:35.436407 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:47:35.457824 607669 cli_runner.go:164] Run: docker exec old-k8s-version-513442 stat /var/lib/dpkg/alternatives/iptables
I1124 13:47:35.505936 607669 oci.go:144] the created container "old-k8s-version-513442" has a running status.
I1124 13:47:35.505993 607669 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa...
I1124 13:47:35.536159 607669 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 13:47:35.565751 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:47:35.587350 607669 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 13:47:35.587376 607669 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-513442 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 13:47:35.639485 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:47:35.659275 607669 machine.go:94] provisionDockerMachine start ...
I1124 13:47:35.659377 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:35.682791 607669 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:35.683193 607669 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33435 <nil> <nil>}
I1124 13:47:35.683215 607669 main.go:143] libmachine: About to run SSH command:
hostname
I1124 13:47:35.683887 607669 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:57402->127.0.0.1:33435: read: connection reset by peer
I1124 13:47:38.829345 607669 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-513442
I1124 13:47:38.829376 607669 ubuntu.go:182] provisioning hostname "old-k8s-version-513442"
I1124 13:47:38.829451 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:38.847276 607669 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:38.847521 607669 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33435 <nil> <nil>}
I1124 13:47:38.847540 607669 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-513442 && echo "old-k8s-version-513442" | sudo tee /etc/hostname
I1124 13:47:39.005190 607669 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-513442
I1124 13:47:39.005277 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.023623 607669 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:39.023848 607669 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33435 <nil> <nil>}
I1124 13:47:39.023866 607669 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-513442' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-513442/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-513442' | sudo tee -a /etc/hosts;
fi
fi
I1124 13:47:39.170228 607669 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 13:47:39.170266 607669 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21932-370498/.minikube CaCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21932-370498/.minikube}
I1124 13:47:39.170286 607669 ubuntu.go:190] setting up certificates
I1124 13:47:39.170295 607669 provision.go:84] configureAuth start
I1124 13:47:39.170348 607669 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-513442
I1124 13:47:39.189446 607669 provision.go:143] copyHostCerts
I1124 13:47:39.189521 607669 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem, removing ...
I1124 13:47:39.189536 607669 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem
I1124 13:47:39.189619 607669 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem (1082 bytes)
I1124 13:47:39.189751 607669 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem, removing ...
I1124 13:47:39.189764 607669 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem
I1124 13:47:39.189810 607669 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem (1123 bytes)
I1124 13:47:39.189989 607669 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem, removing ...
I1124 13:47:39.190006 607669 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem
I1124 13:47:39.190054 607669 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem (1675 bytes)
I1124 13:47:39.190154 607669 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-513442 san=[127.0.0.1 192.168.94.2 localhost minikube old-k8s-version-513442]
I1124 13:47:39.227079 607669 provision.go:177] copyRemoteCerts
I1124 13:47:39.227139 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 13:47:39.227177 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.244951 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.349311 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1124 13:47:39.371319 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 13:47:39.391311 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 13:47:39.411071 607669 provision.go:87] duration metric: took 240.75737ms to configureAuth
I1124 13:47:39.411102 607669 ubuntu.go:206] setting minikube options for container-runtime
I1124 13:47:39.411303 607669 config.go:182] Loaded profile config "old-k8s-version-513442": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:47:39.411317 607669 machine.go:97] duration metric: took 3.752022568s to provisionDockerMachine
I1124 13:47:39.411325 607669 client.go:176] duration metric: took 8.852661553s to LocalClient.Create
I1124 13:47:39.411358 607669 start.go:167] duration metric: took 8.852720089s to libmachine.API.Create "old-k8s-version-513442"
I1124 13:47:39.411372 607669 start.go:293] postStartSetup for "old-k8s-version-513442" (driver="docker")
I1124 13:47:39.411388 607669 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 13:47:39.411452 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 13:47:39.411508 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.429085 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.536320 607669 ssh_runner.go:195] Run: cat /etc/os-release
I1124 13:47:39.540367 607669 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 13:47:39.540402 607669 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 13:47:39.540414 607669 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/addons for local assets ...
I1124 13:47:39.540470 607669 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/files for local assets ...
I1124 13:47:39.540543 607669 filesync.go:149] local asset: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem -> 3741222.pem in /etc/ssl/certs
I1124 13:47:39.540631 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 13:47:39.549275 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:39.573695 607669 start.go:296] duration metric: took 162.301306ms for postStartSetup
I1124 13:47:39.574191 607669 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-513442
I1124 13:47:39.593438 607669 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/config.json ...
I1124 13:47:39.593801 607669 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 13:47:39.593897 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.615008 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.717288 607669 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 13:47:39.722340 607669 start.go:128] duration metric: took 9.166080327s to createHost
I1124 13:47:39.722370 607669 start.go:83] releasing machines lock for "old-k8s-version-513442", held for 9.166275546s
I1124 13:47:39.722447 607669 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-513442
I1124 13:47:39.743680 607669 ssh_runner.go:195] Run: cat /version.json
I1124 13:47:39.743731 607669 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 13:47:39.743745 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.743812 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.763336 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.763737 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.929805 607669 ssh_runner.go:195] Run: systemctl --version
I1124 13:47:39.938447 607669 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 13:47:39.944068 607669 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 13:47:39.944147 607669 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 13:47:39.974609 607669 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 13:47:39.974641 607669 start.go:496] detecting cgroup driver to use...
I1124 13:47:39.974679 607669 detect.go:190] detected "systemd" cgroup driver on host os
I1124 13:47:39.974728 607669 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 13:47:39.990824 607669 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 13:47:40.004856 607669 docker.go:218] disabling cri-docker service (if available) ...
I1124 13:47:40.004920 607669 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 13:47:40.024248 607669 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 13:47:40.044433 607669 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 13:47:40.145638 607669 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 13:47:40.247759 607669 docker.go:234] disabling docker service ...
I1124 13:47:40.247829 607669 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 13:47:40.269922 607669 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 13:47:40.284840 607669 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 13:47:40.379978 607669 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 13:47:40.471616 607669 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 13:47:40.485207 607669 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 13:47:40.501980 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 13:47:40.513545 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 13:47:40.524134 607669 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 13:47:40.524215 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 13:47:40.533927 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:40.543474 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 13:47:40.553177 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:40.563129 607669 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 13:47:40.572813 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 13:47:40.583799 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 13:47:40.593872 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 13:47:40.604166 607669 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 13:47:40.612262 607669 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 13:47:40.620472 607669 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:40.706065 607669 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 13:47:40.809269 607669 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 13:47:40.809335 607669 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 13:47:40.814110 607669 start.go:564] Will wait 60s for crictl version
I1124 13:47:40.814187 607669 ssh_runner.go:195] Run: which crictl
I1124 13:47:40.818745 607669 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 13:47:40.843808 607669 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 13:47:40.843877 607669 ssh_runner.go:195] Run: containerd --version
I1124 13:47:40.865477 607669 ssh_runner.go:195] Run: containerd --version
I1124 13:47:40.893673 607669 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 13:47:36.234464 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:36.234492 572647 cri.go:89] found id: ""
I1124 13:47:36.234504 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:36.234584 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.240249 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:36.240335 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:36.279967 572647 cri.go:89] found id: ""
I1124 13:47:36.279998 572647 logs.go:282] 0 containers: []
W1124 13:47:36.280009 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:36.280027 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:36.280082 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:36.313257 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:36.313286 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:36.313292 572647 cri.go:89] found id: ""
I1124 13:47:36.313302 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:36.313364 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.317818 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.322103 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:36.322170 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:36.352450 572647 cri.go:89] found id: ""
I1124 13:47:36.352485 572647 logs.go:282] 0 containers: []
W1124 13:47:36.352497 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:36.352506 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:36.352569 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:36.381849 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:36.381876 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:36.381881 572647 cri.go:89] found id: ""
I1124 13:47:36.381896 572647 logs.go:282] 2 containers: [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:36.381995 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.386540 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.391244 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:36.391326 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:36.425813 572647 cri.go:89] found id: ""
I1124 13:47:36.425845 572647 logs.go:282] 0 containers: []
W1124 13:47:36.425856 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:36.425864 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:36.425945 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:36.461097 572647 cri.go:89] found id: ""
I1124 13:47:36.461127 572647 logs.go:282] 0 containers: []
W1124 13:47:36.461139 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:36.461153 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:36.461172 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:36.499983 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:36.500029 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:36.521192 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:36.521223 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:36.557807 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:36.557859 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:36.611092 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:36.611122 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:36.647506 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:36.647538 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:36.773107 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:36.773142 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:47:36.847612 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:47:36.847637 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:36.847662 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:36.887116 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:36.887154 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:36.924700 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:36.924746 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:36.974655 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:36.974689 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:37.017086 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:37.017118 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:39.548013 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:39.548547 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:47:39.548616 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:39.548676 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:39.577831 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:39.577852 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:39.577857 572647 cri.go:89] found id: ""
I1124 13:47:39.577867 572647 logs.go:282] 2 containers: [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:39.577947 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.582354 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.586625 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:39.586710 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:39.614522 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:39.614543 572647 cri.go:89] found id: ""
I1124 13:47:39.614552 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:39.614607 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.619054 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:39.619127 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:39.646326 572647 cri.go:89] found id: ""
I1124 13:47:39.646352 572647 logs.go:282] 0 containers: []
W1124 13:47:39.646363 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:39.646370 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:39.646429 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:39.672725 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:39.672745 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:39.672749 572647 cri.go:89] found id: ""
I1124 13:47:39.672757 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:39.672814 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.677191 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.681175 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:39.681258 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:39.708431 572647 cri.go:89] found id: ""
I1124 13:47:39.708455 572647 logs.go:282] 0 containers: []
W1124 13:47:39.708464 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:39.708470 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:39.708519 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:39.740642 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:39.740666 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:39.740672 572647 cri.go:89] found id: ""
I1124 13:47:39.740682 572647 logs.go:282] 2 containers: [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:39.740749 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.745558 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.749963 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:39.750090 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:39.785165 572647 cri.go:89] found id: ""
I1124 13:47:39.785200 572647 logs.go:282] 0 containers: []
W1124 13:47:39.785213 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:39.785223 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:39.785297 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:39.816314 572647 cri.go:89] found id: ""
I1124 13:47:39.816344 572647 logs.go:282] 0 containers: []
W1124 13:47:39.816356 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:39.816369 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:39.816386 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:39.855047 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:39.855082 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:39.884850 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:39.884886 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:39.923160 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:39.923209 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:40.011551 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:40.011587 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:40.028754 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:40.028784 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:40.073406 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:40.073463 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:40.118088 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:40.118130 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:47:40.186938 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:47:40.186963 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:40.186979 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:40.225544 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:40.225575 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:40.264167 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:40.264212 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:40.310248 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:40.310285 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:40.101111 608917 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-608395
I1124 13:47:40.101196 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.122644 608917 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:40.122921 608917 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33441 <nil> <nil>}
I1124 13:47:40.122949 608917 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-608395' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-608395/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-608395' | sudo tee -a /etc/hosts;
fi
fi
I1124 13:47:40.280196 608917 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 13:47:40.280226 608917 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21932-370498/.minikube CaCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21932-370498/.minikube}
I1124 13:47:40.280268 608917 ubuntu.go:190] setting up certificates
I1124 13:47:40.280293 608917 provision.go:84] configureAuth start
I1124 13:47:40.280380 608917 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-608395
I1124 13:47:40.303469 608917 provision.go:143] copyHostCerts
I1124 13:47:40.303532 608917 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem, removing ...
I1124 13:47:40.303543 608917 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem
I1124 13:47:40.303590 608917 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem (1082 bytes)
I1124 13:47:40.303726 608917 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem, removing ...
I1124 13:47:40.303739 608917 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem
I1124 13:47:40.303772 608917 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem (1123 bytes)
I1124 13:47:40.303856 608917 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem, removing ...
I1124 13:47:40.303868 608917 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem
I1124 13:47:40.303892 608917 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem (1675 bytes)
I1124 13:47:40.303983 608917 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem org=jenkins.no-preload-608395 san=[127.0.0.1 192.168.103.2 localhost minikube no-preload-608395]
I1124 13:47:40.375070 608917 provision.go:177] copyRemoteCerts
I1124 13:47:40.375131 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 13:47:40.375180 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.394610 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.501959 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 13:47:40.523137 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1124 13:47:40.542279 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 13:47:40.562226 608917 provision.go:87] duration metric: took 281.905194ms to configureAuth
I1124 13:47:40.562265 608917 ubuntu.go:206] setting minikube options for container-runtime
I1124 13:47:40.562572 608917 config.go:182] Loaded profile config "no-preload-608395": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:47:40.562595 608917 machine.go:97] duration metric: took 3.832793094s to provisionDockerMachine
I1124 13:47:40.562604 608917 client.go:176] duration metric: took 5.273718281s to LocalClient.Create
I1124 13:47:40.562649 608917 start.go:167] duration metric: took 5.273809151s to libmachine.API.Create "no-preload-608395"
I1124 13:47:40.562659 608917 start.go:293] postStartSetup for "no-preload-608395" (driver="docker")
I1124 13:47:40.562671 608917 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 13:47:40.562721 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 13:47:40.562769 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.582715 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.688873 608917 ssh_runner.go:195] Run: cat /etc/os-release
I1124 13:47:40.692683 608917 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 13:47:40.692717 608917 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 13:47:40.692818 608917 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/addons for local assets ...
I1124 13:47:40.692947 608917 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/files for local assets ...
I1124 13:47:40.693078 608917 filesync.go:149] local asset: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem -> 3741222.pem in /etc/ssl/certs
I1124 13:47:40.693208 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 13:47:40.702139 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:40.725883 608917 start.go:296] duration metric: took 163.205649ms for postStartSetup
I1124 13:47:40.726376 608917 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-608395
I1124 13:47:40.744526 608917 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/config.json ...
I1124 13:47:40.745022 608917 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 13:47:40.745098 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.763260 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.869180 608917 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 13:47:40.874423 608917 start.go:128] duration metric: took 5.58807074s to createHost
I1124 13:47:40.874458 608917 start.go:83] releasing machines lock for "no-preload-608395", held for 5.58825096s
I1124 13:47:40.874540 608917 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-608395
I1124 13:47:40.896709 608917 ssh_runner.go:195] Run: cat /version.json
I1124 13:47:40.896763 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.896807 608917 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 13:47:40.896904 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.918859 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.920576 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:41.084454 608917 ssh_runner.go:195] Run: systemctl --version
I1124 13:47:41.091582 608917 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 13:47:41.097406 608917 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 13:47:41.097478 608917 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 13:47:41.125540 608917 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 13:47:41.125566 608917 start.go:496] detecting cgroup driver to use...
I1124 13:47:41.125601 608917 detect.go:190] detected "systemd" cgroup driver on host os
I1124 13:47:41.125650 608917 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 13:47:41.148294 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 13:47:41.167664 608917 docker.go:218] disabling cri-docker service (if available) ...
I1124 13:47:41.167740 608917 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 13:47:41.189235 608917 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 13:47:41.213594 608917 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 13:47:41.336134 608917 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 13:47:41.426955 608917 docker.go:234] disabling docker service ...
I1124 13:47:41.427023 608917 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 13:47:41.448189 608917 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 13:47:41.462073 608917 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 13:47:41.548298 608917 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 13:47:41.635202 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 13:47:41.649149 608917 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 13:47:41.664451 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1124 13:47:41.676460 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 13:47:41.686131 608917 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 13:47:41.686199 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 13:47:41.695720 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:41.705503 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 13:47:41.714879 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:41.724369 608917 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 13:47:41.733131 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 13:47:41.742525 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 13:47:41.751826 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 13:47:41.762473 608917 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 13:47:41.770755 608917 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 13:47:41.779154 608917 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:41.869150 608917 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 13:47:41.957807 608917 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 13:47:41.957876 608917 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 13:47:41.965431 608917 start.go:564] Will wait 60s for crictl version
I1124 13:47:41.965500 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:41.970973 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 13:47:42.001317 608917 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 13:47:42.001405 608917 ssh_runner.go:195] Run: containerd --version
I1124 13:47:42.026320 608917 ssh_runner.go:195] Run: containerd --version
I1124 13:47:42.052318 608917 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1124 13:47:40.896022 607669 cli_runner.go:164] Run: docker network inspect old-k8s-version-513442 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:40.918522 607669 ssh_runner.go:195] Run: grep 192.168.94.1 host.minikube.internal$ /etc/hosts
I1124 13:47:40.923315 607669 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.94.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:40.935781 607669 kubeadm.go:884] updating cluster {Name:old-k8s-version-513442 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-513442 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 13:47:40.935932 607669 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:47:40.935998 607669 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:47:40.965650 607669 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:47:40.965689 607669 containerd.go:534] Images already preloaded, skipping extraction
I1124 13:47:40.965773 607669 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:47:40.999412 607669 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:47:40.999441 607669 cache_images.go:86] Images are preloaded, skipping loading
I1124 13:47:40.999451 607669 kubeadm.go:935] updating node { 192.168.94.2 8443 v1.28.0 containerd true true} ...
I1124 13:47:40.999568 607669 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-513442 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-513442 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 13:47:40.999640 607669 ssh_runner.go:195] Run: sudo crictl info
I1124 13:47:41.030216 607669 cni.go:84] Creating CNI manager for ""
I1124 13:47:41.030250 607669 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:41.030273 607669 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 13:47:41.030304 607669 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.94.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-513442 NodeName:old-k8s-version-513442 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.94.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.94.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 13:47:41.030479 607669 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.94.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-513442"
kubeletExtraArgs:
node-ip: 192.168.94.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.94.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 13:47:41.030593 607669 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 13:47:41.040496 607669 binaries.go:51] Found k8s binaries, skipping transfer
I1124 13:47:41.040574 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 13:47:41.048965 607669 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1124 13:47:41.063246 607669 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 13:47:41.080199 607669 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2175 bytes)
I1124 13:47:41.095141 607669 ssh_runner.go:195] Run: grep 192.168.94.2 control-plane.minikube.internal$ /etc/hosts
I1124 13:47:41.099735 607669 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.94.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:41.111816 607669 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:41.205774 607669 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:47:41.229647 607669 certs.go:69] Setting up /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442 for IP: 192.168.94.2
I1124 13:47:41.229678 607669 certs.go:195] generating shared ca certs ...
I1124 13:47:41.229702 607669 certs.go:227] acquiring lock for ca certs: {Name:mk5874497fda855b1e2ff816147ffdfbc44946ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.229867 607669 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key
I1124 13:47:41.229906 607669 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key
I1124 13:47:41.229935 607669 certs.go:257] generating profile certs ...
I1124 13:47:41.230010 607669 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.key
I1124 13:47:41.230025 607669 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.crt with IP's: []
I1124 13:47:41.438692 607669 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.crt ...
I1124 13:47:41.438735 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.crt: {Name:mkbb44e092f1569b20ffeeea6d19871e0c7ea39c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.438903 607669 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.key ...
I1124 13:47:41.438942 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.key: {Name:mkcdbea7ce1dc4681fc91bbc4b78d2c028c94687 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.439100 607669 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4
I1124 13:47:41.439127 607669 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.94.2]
I1124 13:47:41.518895 607669 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4 ...
I1124 13:47:41.518941 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4: {Name:mk47b90333d21f736ed33504f6da28b133242551 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.519134 607669 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4 ...
I1124 13:47:41.519153 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4: {Name:mk4592466df77ceb7a68fa27e5f9a0201b1a8063 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.519239 607669 certs.go:382] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt
I1124 13:47:41.519312 607669 certs.go:386] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key
I1124 13:47:41.519368 607669 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key
I1124 13:47:41.519388 607669 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt with IP's: []
I1124 13:47:41.757186 607669 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt ...
I1124 13:47:41.757217 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt: {Name:mkb434108adbee544176aebf04c9ed8a63b76175 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.757418 607669 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key ...
I1124 13:47:41.757442 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key: {Name:mk640e3789cee888121bd6cc947590ae24e90dd5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.757683 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem (1338 bytes)
W1124 13:47:41.757725 607669 certs.go:480] ignoring /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122_empty.pem, impossibly tiny 0 bytes
I1124 13:47:41.757736 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem (1679 bytes)
I1124 13:47:41.757777 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem (1082 bytes)
I1124 13:47:41.757814 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem (1123 bytes)
I1124 13:47:41.757849 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem (1675 bytes)
I1124 13:47:41.757940 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:41.758610 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 13:47:41.778634 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 13:47:41.799349 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 13:47:41.825279 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 13:47:41.844900 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 13:47:41.865036 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 13:47:41.887428 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 13:47:41.912645 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 13:47:41.937284 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 13:47:41.966303 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem --> /usr/share/ca-certificates/374122.pem (1338 bytes)
I1124 13:47:41.989056 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /usr/share/ca-certificates/3741222.pem (1708 bytes)
I1124 13:47:42.011989 607669 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 13:47:42.027976 607669 ssh_runner.go:195] Run: openssl version
I1124 13:47:42.036340 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/3741222.pem && ln -fs /usr/share/ca-certificates/3741222.pem /etc/ssl/certs/3741222.pem"
I1124 13:47:42.046698 607669 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/3741222.pem
I1124 13:47:42.051406 607669 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 13:20 /usr/share/ca-certificates/3741222.pem
I1124 13:47:42.051481 607669 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/3741222.pem
I1124 13:47:42.089903 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/3741222.pem /etc/ssl/certs/3ec20f2e.0"
I1124 13:47:42.100357 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 13:47:42.110986 607669 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:42.115955 607669 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 13:14 /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:42.116031 607669 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:42.153310 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 13:47:42.163209 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/374122.pem && ln -fs /usr/share/ca-certificates/374122.pem /etc/ssl/certs/374122.pem"
I1124 13:47:42.173625 607669 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/374122.pem
I1124 13:47:42.178229 607669 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 13:20 /usr/share/ca-certificates/374122.pem
I1124 13:47:42.178308 607669 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/374122.pem
I1124 13:47:42.216281 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/374122.pem /etc/ssl/certs/51391683.0"
I1124 13:47:42.228415 607669 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 13:47:42.232854 607669 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 13:47:42.232959 607669 kubeadm.go:401] StartCluster: {Name:old-k8s-version-513442 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-513442 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:47:42.233058 607669 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 13:47:42.233119 607669 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 13:47:42.262130 607669 cri.go:89] found id: ""
I1124 13:47:42.262225 607669 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 13:47:42.271622 607669 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 13:47:42.280568 607669 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 13:47:42.280637 607669 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 13:47:42.289222 607669 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 13:47:42.289241 607669 kubeadm.go:158] found existing configuration files:
I1124 13:47:42.289287 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 13:47:42.297481 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 13:47:42.297560 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 13:47:42.306305 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 13:47:42.315150 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 13:47:42.315224 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 13:47:42.324595 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 13:47:42.333840 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 13:47:42.333922 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 13:47:42.344021 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 13:47:42.355171 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 13:47:42.355226 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 13:47:42.364345 607669 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 13:47:42.433190 607669 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 13:47:42.433270 607669 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 13:47:42.487608 607669 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 13:47:42.487695 607669 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 13:47:42.487758 607669 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 13:47:42.487823 607669 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 13:47:42.487892 607669 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 13:47:42.487986 607669 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 13:47:42.488057 607669 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 13:47:42.488125 607669 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 13:47:42.488216 607669 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 13:47:42.488285 607669 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 13:47:42.488352 607669 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 13:47:42.585565 607669 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 13:47:42.585750 607669 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 13:47:42.585896 607669 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 13:47:42.762435 607669 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 13:47:42.054673 608917 cli_runner.go:164] Run: docker network inspect no-preload-608395 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:42.073094 608917 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1124 13:47:42.078208 608917 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:42.089858 608917 kubeadm.go:884] updating cluster {Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemu
FirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 13:47:42.090126 608917 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 13:47:42.090181 608917 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:47:42.117576 608917 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1124 13:47:42.117601 608917 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1124 13:47:42.117671 608917 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:42.117683 608917 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.117696 608917 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1124 13:47:42.117708 608917 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.117683 608917 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.117737 608917 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.117738 608917 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.117773 608917 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.119957 608917 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.120028 608917 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.120041 608917 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.120103 608917 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.120144 608917 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.120206 608917 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1124 13:47:42.120361 608917 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:42.120651 608917 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.324599 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1124 13:47:42.324658 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.329752 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1124 13:47:42.329811 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1124 13:47:42.340410 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1124 13:47:42.340483 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.345994 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1124 13:47:42.346082 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.350632 608917 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1124 13:47:42.350771 608917 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.350861 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.354889 608917 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1124 13:47:42.355021 608917 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1124 13:47:42.355078 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.365506 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1124 13:47:42.365584 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.370164 608917 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1124 13:47:42.370246 608917 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.370299 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.371573 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.371569 608917 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1124 13:47:42.371633 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 13:47:42.371663 608917 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.371700 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.383984 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1124 13:47:42.384064 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.391339 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1124 13:47:42.391424 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.394058 608917 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1124 13:47:42.394107 608917 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.394139 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.394173 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.394139 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.410796 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 13:47:42.412029 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.415223 608917 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1124 13:47:42.415273 608917 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.415318 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.430558 608917 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1124 13:47:42.430610 608917 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.430661 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.432115 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.432240 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.432710 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.449068 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 13:47:42.451309 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.451333 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.451434 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.471426 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.471426 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.472006 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.507575 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1124 13:47:42.507696 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1124 13:47:42.507737 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.507752 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.507776 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1124 13:47:42.507812 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1124 13:47:42.512031 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1124 13:47:42.512160 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1124 13:47:42.512183 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.512220 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1124 13:47:42.512281 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1124 13:47:42.542249 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1124 13:47:42.542293 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1124 13:47:42.542356 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.542419 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.542436 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1124 13:47:42.542450 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1124 13:47:42.542460 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1124 13:47:42.542482 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1124 13:47:42.542522 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1124 13:47:42.542541 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1124 13:47:42.547506 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1124 13:47:42.547609 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1124 13:47:42.591222 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1124 13:47:42.591265 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1124 13:47:42.591339 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1124 13:47:42.591358 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1124 13:47:42.630891 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1124 13:47:42.630960 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1124 13:47:42.635881 608917 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1124 13:47:42.635984 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1124 13:47:42.696822 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1124 13:47:42.696868 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1124 13:47:42.696964 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1124 13:47:42.696987 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1124 13:47:42.855586 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1124 13:47:43.017613 608917 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1124 13:47:43.017692 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1124 13:47:43.363331 608917 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1124 13:47:43.363429 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:44.322473 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.304751727s)
I1124 13:47:44.322506 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1124 13:47:44.322534 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1124 13:47:44.322535 608917 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1124 13:47:44.322572 608917 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:44.322581 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1124 13:47:44.322611 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:44.327186 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:42.765072 607669 out.go:252] - Generating certificates and keys ...
I1124 13:47:42.765189 607669 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 13:47:42.765429 607669 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 13:47:42.918631 607669 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 13:47:43.145530 607669 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 13:47:43.262863 607669 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 13:47:43.516853 607669 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 13:47:43.680193 607669 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 13:47:43.680382 607669 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-513442] and IPs [192.168.94.2 127.0.0.1 ::1]
I1124 13:47:43.927450 607669 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 13:47:43.927668 607669 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-513442] and IPs [192.168.94.2 127.0.0.1 ::1]
I1124 13:47:44.210866 607669 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 13:47:44.444469 607669 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 13:47:44.571652 607669 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 13:47:44.571791 607669 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 13:47:44.658495 607669 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 13:47:44.899827 607669 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 13:47:45.259836 607669 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 13:47:45.407067 607669 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 13:47:45.407645 607669 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 13:47:45.412109 607669 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 13:47:42.868629 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:45.407011 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.084400483s)
I1124 13:47:45.407048 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1124 13:47:45.407074 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1124 13:47:45.407121 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1124 13:47:45.407011 608917 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.079785919s)
I1124 13:47:45.407225 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:46.754417 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.347254819s)
I1124 13:47:46.754464 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1124 13:47:46.754487 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1124 13:47:46.754539 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1124 13:47:46.754423 608917 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.34716741s)
I1124 13:47:46.754625 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:46.791381 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1124 13:47:46.791500 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1124 13:47:48.250258 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1: (1.49567347s)
I1124 13:47:48.250293 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1124 13:47:48.250322 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1124 13:47:48.250369 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1124 13:47:48.250393 608917 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.458859359s)
I1124 13:47:48.250436 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1124 13:47:48.250458 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1124 13:47:49.525346 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.274952475s)
I1124 13:47:49.525372 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1124 13:47:49.525397 608917 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1124 13:47:49.525432 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1124 13:47:45.413783 607669 out.go:252] - Booting up control plane ...
I1124 13:47:45.414000 607669 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 13:47:45.414122 607669 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 13:47:45.415606 607669 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 13:47:45.433197 607669 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 13:47:45.434777 607669 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 13:47:45.434850 607669 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 13:47:45.555124 607669 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 13:47:47.870054 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 13:47:47.870131 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:47.870207 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:47.909612 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:47:47.909637 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:47.909644 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:47.909649 572647 cri.go:89] found id: ""
I1124 13:47:47.909660 572647 logs.go:282] 3 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:47.909721 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.915163 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.920826 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.926251 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:47.926326 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:47.968362 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:47.968399 572647 cri.go:89] found id: ""
I1124 13:47:47.968412 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:47.968487 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.973840 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:47.973955 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:48.011120 572647 cri.go:89] found id: ""
I1124 13:47:48.011151 572647 logs.go:282] 0 containers: []
W1124 13:47:48.011163 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:48.011172 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:48.011242 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:48.049409 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:48.049433 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:48.049439 572647 cri.go:89] found id: ""
I1124 13:47:48.049449 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:48.049612 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.055041 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.061717 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:48.061795 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:48.098008 572647 cri.go:89] found id: ""
I1124 13:47:48.098036 572647 logs.go:282] 0 containers: []
W1124 13:47:48.098048 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:48.098056 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:48.098116 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:48.134832 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:47:48.134858 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:48.134864 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:48.134868 572647 cri.go:89] found id: ""
I1124 13:47:48.134879 572647 logs.go:282] 3 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:48.134960 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.140512 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.146067 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.151167 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:48.151293 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:48.194241 572647 cri.go:89] found id: ""
I1124 13:47:48.194275 572647 logs.go:282] 0 containers: []
W1124 13:47:48.194287 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:48.194297 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:48.194366 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:48.235586 572647 cri.go:89] found id: ""
I1124 13:47:48.235617 572647 logs.go:282] 0 containers: []
W1124 13:47:48.235629 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:48.235644 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:48.235660 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:48.322131 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:47:48.322175 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:47:48.358925 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:48.358964 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:48.399403 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:47:48.399439 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:47:48.442576 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:48.442621 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:48.490297 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:48.490336 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:48.543239 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:48.543277 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:48.591561 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:48.591604 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:48.639975 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:48.640012 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:48.703335 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:48.703393 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:48.760778 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:48.760820 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:48.887283 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:48.887328 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:48.915138 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:48.915177 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1124 13:47:50.557442 607669 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.002632 seconds
I1124 13:47:50.557627 607669 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 13:47:50.572390 607669 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 13:47:51.098533 607669 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 13:47:51.098764 607669 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-513442 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 13:47:51.610053 607669 kubeadm.go:319] [bootstrap-token] Using token: eki30b.4i7191y9601t9kqb
I1124 13:47:51.611988 607669 out.go:252] - Configuring RBAC rules ...
I1124 13:47:51.612142 607669 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 13:47:51.618056 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 13:47:51.627751 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 13:47:51.631902 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 13:47:51.635666 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 13:47:51.643042 607669 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 13:47:51.655046 607669 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 13:47:51.879254 607669 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 13:47:52.022857 607669 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 13:47:52.024273 607669 kubeadm.go:319]
I1124 13:47:52.024439 607669 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 13:47:52.024451 607669 kubeadm.go:319]
I1124 13:47:52.024565 607669 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 13:47:52.024593 607669 kubeadm.go:319]
I1124 13:47:52.024628 607669 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 13:47:52.024712 607669 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 13:47:52.024786 607669 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 13:47:52.024795 607669 kubeadm.go:319]
I1124 13:47:52.024870 607669 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 13:47:52.024880 607669 kubeadm.go:319]
I1124 13:47:52.024984 607669 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 13:47:52.024995 607669 kubeadm.go:319]
I1124 13:47:52.025066 607669 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 13:47:52.025175 607669 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 13:47:52.025273 607669 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 13:47:52.025282 607669 kubeadm.go:319]
I1124 13:47:52.025399 607669 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 13:47:52.025508 607669 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 13:47:52.025517 607669 kubeadm.go:319]
I1124 13:47:52.025633 607669 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token eki30b.4i7191y9601t9kqb \
I1124 13:47:52.025782 607669 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c \
I1124 13:47:52.025814 607669 kubeadm.go:319] --control-plane
I1124 13:47:52.025823 607669 kubeadm.go:319]
I1124 13:47:52.025955 607669 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 13:47:52.025964 607669 kubeadm.go:319]
I1124 13:47:52.026081 607669 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token eki30b.4i7191y9601t9kqb \
I1124 13:47:52.026226 607669 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c
I1124 13:47:52.029215 607669 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 13:47:52.029395 607669 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 13:47:52.029436 607669 cni.go:84] Creating CNI manager for ""
I1124 13:47:52.029450 607669 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:52.032075 607669 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 13:47:52.378094 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.852631537s)
I1124 13:47:52.378131 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1124 13:47:52.378164 608917 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1124 13:47:52.378216 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1124 13:47:52.826755 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1124 13:47:52.826808 608917 cache_images.go:125] Successfully loaded all cached images
I1124 13:47:52.826816 608917 cache_images.go:94] duration metric: took 10.70919772s to LoadCachedImages
I1124 13:47:52.826831 608917 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1124 13:47:52.826984 608917 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-608395 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 13:47:52.827057 608917 ssh_runner.go:195] Run: sudo crictl info
I1124 13:47:52.858503 608917 cni.go:84] Creating CNI manager for ""
I1124 13:47:52.858531 608917 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:52.858557 608917 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 13:47:52.858588 608917 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-608395 NodeName:no-preload-608395 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 13:47:52.858757 608917 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-608395"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 13:47:52.858835 608917 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1124 13:47:52.869416 608917 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1124 13:47:52.869483 608917 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1124 13:47:52.881260 608917 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1124 13:47:52.881274 608917 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1124 13:47:52.881284 608917 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1124 13:47:52.881370 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1124 13:47:52.886648 608917 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1124 13:47:52.886683 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1124 13:47:53.829310 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 13:47:53.844364 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1124 13:47:53.848663 608917 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1124 13:47:53.848703 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1124 13:47:54.078871 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1124 13:47:54.083904 608917 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1124 13:47:54.083971 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1124 13:47:54.263727 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 13:47:54.272819 608917 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1124 13:47:54.287533 608917 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 13:47:54.307319 608917 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1124 13:47:54.321728 608917 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1124 13:47:54.326108 608917 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:54.337568 608917 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:54.423252 608917 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:47:54.446892 608917 certs.go:69] Setting up /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395 for IP: 192.168.103.2
I1124 13:47:54.446932 608917 certs.go:195] generating shared ca certs ...
I1124 13:47:54.446950 608917 certs.go:227] acquiring lock for ca certs: {Name:mk5874497fda855b1e2ff816147ffdfbc44946ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.447115 608917 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key
I1124 13:47:54.447173 608917 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key
I1124 13:47:54.447189 608917 certs.go:257] generating profile certs ...
I1124 13:47:54.447250 608917 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.key
I1124 13:47:54.447265 608917 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.crt with IP's: []
I1124 13:47:54.480111 608917 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.crt ...
I1124 13:47:54.480143 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.crt: {Name:mk0373d89f453529126dca865f8c4273a9b76c80 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.480318 608917 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.key ...
I1124 13:47:54.480326 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.key: {Name:mkd4fd6c97a850045d4415dcd6682504ca05b6b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.480412 608917 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0
I1124 13:47:54.480432 608917 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1124 13:47:54.564575 608917 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0 ...
I1124 13:47:54.564606 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0: {Name:mk39921501aaa8b9dfdaa0c59584189fbc232834 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.564812 608917 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0 ...
I1124 13:47:54.564832 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0: {Name:mk1e5ec23cae444088ab39a7c9f4bd7f0b68695e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.565002 608917 certs.go:382] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt
I1124 13:47:54.565092 608917 certs.go:386] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key
I1124 13:47:54.565147 608917 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key
I1124 13:47:54.565166 608917 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt with IP's: []
I1124 13:47:54.682010 608917 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt ...
I1124 13:47:54.682042 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt: {Name:mk61707e6277a856c1f1cee667479489cd8cfc56 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.682251 608917 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key ...
I1124 13:47:54.682270 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key: {Name:mkdc07f88aff1f58330c9757ac629acf2062c9ed Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.682520 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem (1338 bytes)
W1124 13:47:54.682564 608917 certs.go:480] ignoring /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122_empty.pem, impossibly tiny 0 bytes
I1124 13:47:54.682574 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem (1679 bytes)
I1124 13:47:54.682602 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem (1082 bytes)
I1124 13:47:54.682626 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem (1123 bytes)
I1124 13:47:54.682651 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem (1675 bytes)
I1124 13:47:54.682697 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:54.683371 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 13:47:54.703387 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 13:47:54.722770 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 13:47:54.743107 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 13:47:54.763697 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1124 13:47:54.783164 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 13:47:54.802752 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 13:47:54.822653 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1124 13:47:54.843126 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem --> /usr/share/ca-certificates/374122.pem (1338 bytes)
I1124 13:47:54.867619 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /usr/share/ca-certificates/3741222.pem (1708 bytes)
I1124 13:47:54.887814 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 13:47:54.907876 608917 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 13:47:54.922379 608917 ssh_runner.go:195] Run: openssl version
I1124 13:47:54.929636 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/374122.pem && ln -fs /usr/share/ca-certificates/374122.pem /etc/ssl/certs/374122.pem"
I1124 13:47:54.940237 608917 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/374122.pem
I1124 13:47:54.944856 608917 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 13:20 /usr/share/ca-certificates/374122.pem
I1124 13:47:54.944961 608917 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/374122.pem
I1124 13:47:54.983788 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/374122.pem /etc/ssl/certs/51391683.0"
I1124 13:47:54.994031 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/3741222.pem && ln -fs /usr/share/ca-certificates/3741222.pem /etc/ssl/certs/3741222.pem"
I1124 13:47:55.004849 608917 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/3741222.pem
I1124 13:47:55.010168 608917 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 13:20 /usr/share/ca-certificates/3741222.pem
I1124 13:47:55.010231 608917 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/3741222.pem
I1124 13:47:55.048930 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/3741222.pem /etc/ssl/certs/3ec20f2e.0"
I1124 13:47:55.058618 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 13:47:55.068496 608917 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:52.033462 607669 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 13:47:52.040052 607669 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 13:47:52.040080 607669 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 13:47:52.058896 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 13:47:52.863538 607669 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 13:47:52.863612 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:52.863691 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-513442 minikube.k8s.io/updated_at=2025_11_24T13_47_52_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab minikube.k8s.io/name=old-k8s-version-513442 minikube.k8s.io/primary=true
I1124 13:47:52.876635 607669 ops.go:34] apiserver oom_adj: -16
I1124 13:47:52.948231 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:53.449196 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:53.948546 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:54.448277 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:54.949098 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:55.073505 608917 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 13:14 /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:55.073568 608917 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:55.110353 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 13:47:55.120226 608917 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 13:47:55.124508 608917 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 13:47:55.124574 608917 kubeadm.go:401] StartCluster: {Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:47:55.124676 608917 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 13:47:55.124734 608917 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 13:47:55.153610 608917 cri.go:89] found id: ""
I1124 13:47:55.153686 608917 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 13:47:55.163237 608917 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 13:47:55.172281 608917 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 13:47:55.172352 608917 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 13:47:55.181432 608917 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 13:47:55.181458 608917 kubeadm.go:158] found existing configuration files:
I1124 13:47:55.181515 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 13:47:55.190814 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 13:47:55.190897 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 13:47:55.200577 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 13:47:55.210272 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 13:47:55.210344 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 13:47:55.219990 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 13:47:55.228828 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 13:47:55.228885 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 13:47:55.238104 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 13:47:55.246631 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 13:47:55.246745 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 13:47:55.255509 608917 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 13:47:55.316154 608917 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 13:47:55.376542 608917 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 13:47:55.448626 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:55.949156 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:56.449055 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:56.949140 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:57.448946 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:57.948732 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:58.448437 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:58.948803 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:59.449172 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:59.948946 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:59.001079 572647 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.085873793s)
W1124 13:47:59.001127 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1124 13:47:59.001145 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:59.001163 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:00.448856 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:00.948957 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:01.448664 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:01.948985 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:02.448486 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:02.948890 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:03.448380 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:03.948515 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:04.448564 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:04.527535 607669 kubeadm.go:1114] duration metric: took 11.66399569s to wait for elevateKubeSystemPrivileges
I1124 13:48:04.527576 607669 kubeadm.go:403] duration metric: took 22.29462596s to StartCluster
I1124 13:48:04.527612 607669 settings.go:142] acquiring lock: {Name:mka599a3c9bae62ffb84d261186583052ce40f68 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:04.527702 607669 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21932-370498/kubeconfig
I1124 13:48:04.529054 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/kubeconfig: {Name:mk44e8f04ffd8592063c19ad1e339ad14aaa66a2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:04.529299 607669 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 13:48:04.529306 607669 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:48:04.529383 607669 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 13:48:04.529498 607669 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-513442"
I1124 13:48:04.529517 607669 config.go:182] Loaded profile config "old-k8s-version-513442": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:48:04.529519 607669 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-513442"
I1124 13:48:04.529535 607669 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-513442"
I1124 13:48:04.529561 607669 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-513442"
I1124 13:48:04.529641 607669 host.go:66] Checking if "old-k8s-version-513442" exists ...
I1124 13:48:04.529946 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:48:04.530180 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:48:04.531152 607669 out.go:179] * Verifying Kubernetes components...
I1124 13:48:04.532717 607669 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:48:04.557008 607669 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:48:04.558405 607669 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:04.558429 607669 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 13:48:04.558495 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:48:04.562314 607669 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-513442"
I1124 13:48:04.562381 607669 host.go:66] Checking if "old-k8s-version-513442" exists ...
I1124 13:48:04.563175 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:48:04.584062 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:48:04.598587 607669 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:04.598613 607669 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 13:48:04.598683 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:48:04.628606 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:48:04.653771 607669 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.94.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 13:48:04.701037 607669 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:48:04.714197 607669 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:04.765729 607669 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:04.912320 607669 start.go:977] {"host.minikube.internal": 192.168.94.1} host record injected into CoreDNS's ConfigMap
I1124 13:48:04.913621 607669 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-513442" to be "Ready" ...
I1124 13:48:05.136398 607669 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 13:48:05.160590 608917 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1124 13:48:05.160664 608917 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 13:48:05.160771 608917 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 13:48:05.160854 608917 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 13:48:05.160886 608917 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 13:48:05.160993 608917 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 13:48:05.161038 608917 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 13:48:05.161128 608917 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 13:48:05.161215 608917 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 13:48:05.161290 608917 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 13:48:05.161348 608917 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 13:48:05.161407 608917 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 13:48:05.161478 608917 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 13:48:05.161607 608917 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 13:48:05.161758 608917 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 13:48:05.161894 608917 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1124 13:48:05.162009 608917 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 13:48:05.163691 608917 out.go:252] - Generating certificates and keys ...
I1124 13:48:05.163805 608917 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 13:48:05.163947 608917 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 13:48:05.164054 608917 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 13:48:05.164154 608917 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 13:48:05.164250 608917 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 13:48:05.164325 608917 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 13:48:05.164403 608917 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 13:48:05.164579 608917 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-608395] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 13:48:05.164662 608917 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 13:48:05.164844 608917 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-608395] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 13:48:05.164993 608917 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 13:48:05.165088 608917 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 13:48:05.165130 608917 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 13:48:05.165182 608917 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 13:48:05.165250 608917 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 13:48:05.165313 608917 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1124 13:48:05.165382 608917 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 13:48:05.165456 608917 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 13:48:05.165506 608917 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 13:48:05.165580 608917 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 13:48:05.165637 608917 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 13:48:05.167858 608917 out.go:252] - Booting up control plane ...
I1124 13:48:05.167962 608917 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 13:48:05.168043 608917 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 13:48:05.168104 608917 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 13:48:05.168199 608917 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 13:48:05.168298 608917 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1124 13:48:05.168436 608917 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1124 13:48:05.168514 608917 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 13:48:05.168558 608917 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 13:48:05.168715 608917 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1124 13:48:05.168854 608917 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1124 13:48:05.168953 608917 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001985013s
I1124 13:48:05.169093 608917 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1124 13:48:05.169202 608917 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1124 13:48:05.169339 608917 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1124 13:48:05.169461 608917 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1124 13:48:05.169582 608917 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 2.171045551s
I1124 13:48:05.169691 608917 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.746683308s
I1124 13:48:05.169782 608917 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 5.002983514s
I1124 13:48:05.169958 608917 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 13:48:05.170079 608917 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 13:48:05.170136 608917 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 13:48:05.170449 608917 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-608395 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 13:48:05.170534 608917 kubeadm.go:319] [bootstrap-token] Using token: 0m3tk6.bp5t9g266aj6zg5e
I1124 13:48:05.172344 608917 out.go:252] - Configuring RBAC rules ...
I1124 13:48:05.172497 608917 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 13:48:05.172606 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 13:48:05.172790 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 13:48:05.172947 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 13:48:05.173067 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 13:48:05.173152 608917 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 13:48:05.173251 608917 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 13:48:05.173290 608917 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 13:48:05.173330 608917 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 13:48:05.173336 608917 kubeadm.go:319]
I1124 13:48:05.173391 608917 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 13:48:05.173397 608917 kubeadm.go:319]
I1124 13:48:05.173470 608917 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 13:48:05.173476 608917 kubeadm.go:319]
I1124 13:48:05.173498 608917 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 13:48:05.173553 608917 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 13:48:05.173610 608917 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 13:48:05.173623 608917 kubeadm.go:319]
I1124 13:48:05.173669 608917 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 13:48:05.173675 608917 kubeadm.go:319]
I1124 13:48:05.173718 608917 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 13:48:05.173727 608917 kubeadm.go:319]
I1124 13:48:05.173778 608917 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 13:48:05.173858 608917 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 13:48:05.173981 608917 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 13:48:05.173990 608917 kubeadm.go:319]
I1124 13:48:05.174085 608917 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 13:48:05.174165 608917 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 13:48:05.174170 608917 kubeadm.go:319]
I1124 13:48:05.174250 608917 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 0m3tk6.bp5t9g266aj6zg5e \
I1124 13:48:05.174352 608917 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c \
I1124 13:48:05.174376 608917 kubeadm.go:319] --control-plane
I1124 13:48:05.174381 608917 kubeadm.go:319]
I1124 13:48:05.174459 608917 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 13:48:05.174465 608917 kubeadm.go:319]
I1124 13:48:05.174560 608917 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 0m3tk6.bp5t9g266aj6zg5e \
I1124 13:48:05.174802 608917 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c
I1124 13:48:05.174826 608917 cni.go:84] Creating CNI manager for ""
I1124 13:48:05.174836 608917 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:48:05.177484 608917 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 13:48:05.137677 607669 addons.go:530] duration metric: took 608.290782ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 13:48:01.553682 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:02.346718 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:51122->192.168.76.2:8443: read: connection reset by peer
I1124 13:48:02.346797 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:02.346868 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:02.379430 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:02.379461 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:48:02.379468 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:02.379472 572647 cri.go:89] found id: ""
I1124 13:48:02.379481 572647 logs.go:282] 3 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:02.379554 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.384666 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.389028 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.393413 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:02.393493 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:02.423298 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:02.423317 572647 cri.go:89] found id: ""
I1124 13:48:02.423325 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:02.423377 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.428323 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:02.428396 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:02.458971 572647 cri.go:89] found id: ""
I1124 13:48:02.459002 572647 logs.go:282] 0 containers: []
W1124 13:48:02.459014 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:02.459023 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:02.459136 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:02.495221 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:02.495253 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:02.495258 572647 cri.go:89] found id: ""
I1124 13:48:02.495267 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:02.495325 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.504536 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.513709 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:02.513782 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:02.545556 572647 cri.go:89] found id: ""
I1124 13:48:02.545589 572647 logs.go:282] 0 containers: []
W1124 13:48:02.545603 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:02.545613 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:02.545686 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:02.575683 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:02.575710 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:48:02.575714 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:02.575717 572647 cri.go:89] found id: ""
I1124 13:48:02.575725 572647 logs.go:282] 3 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:02.575799 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.580340 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.584784 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.588717 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:02.588774 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:02.617522 572647 cri.go:89] found id: ""
I1124 13:48:02.617550 572647 logs.go:282] 0 containers: []
W1124 13:48:02.617558 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:02.617567 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:02.617616 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:02.647375 572647 cri.go:89] found id: ""
I1124 13:48:02.647407 572647 logs.go:282] 0 containers: []
W1124 13:48:02.647418 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:02.647432 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:02.647445 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:02.685850 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:02.685900 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:02.794118 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:02.794164 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:02.866960 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:02.866982 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:02.866997 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:02.908627 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:48:02.908671 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:48:02.949348 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:02.949380 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:02.997498 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:02.997541 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:03.065816 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:48:03.065856 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:48:03.101360 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:03.101393 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:03.140140 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:03.140183 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:03.160020 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:03.160058 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:03.202092 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:03.202136 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:03.247020 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:03.247060 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:03.283475 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:03.283518 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:05.832996 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:05.833478 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:05.833543 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:05.833607 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:05.862229 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:05.862254 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:05.862258 572647 cri.go:89] found id: ""
I1124 13:48:05.862267 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:05.862320 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.867091 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.871378 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:05.871455 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:05.900338 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:05.900361 572647 cri.go:89] found id: ""
I1124 13:48:05.900370 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:05.900428 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.904531 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:05.904606 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:05.933536 572647 cri.go:89] found id: ""
I1124 13:48:05.933565 572647 logs.go:282] 0 containers: []
W1124 13:48:05.933579 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:05.933587 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:05.933645 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:05.961942 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:05.961966 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:05.961980 572647 cri.go:89] found id: ""
I1124 13:48:05.961988 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:05.962048 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.966413 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.970560 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:05.970640 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:05.999021 572647 cri.go:89] found id: ""
I1124 13:48:05.999046 572647 logs.go:282] 0 containers: []
W1124 13:48:05.999057 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:05.999065 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:05.999125 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:06.030192 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:06.030216 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:06.030222 572647 cri.go:89] found id: ""
I1124 13:48:06.030233 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:06.030291 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:06.034509 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:06.038518 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:06.038602 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:06.067432 572647 cri.go:89] found id: ""
I1124 13:48:06.067459 572647 logs.go:282] 0 containers: []
W1124 13:48:06.067469 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:06.067477 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:06.067557 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:06.098683 572647 cri.go:89] found id: ""
I1124 13:48:06.098712 572647 logs.go:282] 0 containers: []
W1124 13:48:06.098723 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:06.098736 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:06.098753 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:06.163737 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:06.163765 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:06.163783 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:05.179143 608917 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 13:48:05.184780 608917 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1124 13:48:05.184802 608917 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 13:48:05.199547 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 13:48:05.451312 608917 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 13:48:05.451481 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:05.451599 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-608395 minikube.k8s.io/updated_at=2025_11_24T13_48_05_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab minikube.k8s.io/name=no-preload-608395 minikube.k8s.io/primary=true
I1124 13:48:05.479434 608917 ops.go:34] apiserver oom_adj: -16
I1124 13:48:05.560179 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:06.061204 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:06.560802 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:07.061219 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:07.561139 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:08.061015 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:08.561034 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:09.061268 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:09.560397 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:09.636185 608917 kubeadm.go:1114] duration metric: took 4.184744627s to wait for elevateKubeSystemPrivileges
I1124 13:48:09.636235 608917 kubeadm.go:403] duration metric: took 14.511667218s to StartCluster
I1124 13:48:09.636257 608917 settings.go:142] acquiring lock: {Name:mka599a3c9bae62ffb84d261186583052ce40f68 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:09.636332 608917 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21932-370498/kubeconfig
I1124 13:48:09.637980 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/kubeconfig: {Name:mk44e8f04ffd8592063c19ad1e339ad14aaa66a2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:09.638233 608917 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 13:48:09.638262 608917 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:48:09.638340 608917 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 13:48:09.638439 608917 addons.go:70] Setting storage-provisioner=true in profile "no-preload-608395"
I1124 13:48:09.638460 608917 addons.go:239] Setting addon storage-provisioner=true in "no-preload-608395"
I1124 13:48:09.638459 608917 addons.go:70] Setting default-storageclass=true in profile "no-preload-608395"
I1124 13:48:09.638486 608917 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-608395"
I1124 13:48:09.638512 608917 host.go:66] Checking if "no-preload-608395" exists ...
I1124 13:48:09.638608 608917 config.go:182] Loaded profile config "no-preload-608395": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:48:09.638889 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:48:09.639090 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:48:09.640719 608917 out.go:179] * Verifying Kubernetes components...
I1124 13:48:09.642235 608917 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:48:09.665980 608917 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:48:09.668239 608917 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:09.668262 608917 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 13:48:09.668334 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:48:09.668545 608917 addons.go:239] Setting addon default-storageclass=true in "no-preload-608395"
I1124 13:48:09.668594 608917 host.go:66] Checking if "no-preload-608395" exists ...
I1124 13:48:09.669115 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:48:09.708052 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:48:09.711213 608917 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:09.711236 608917 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 13:48:09.711297 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:48:09.737250 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:48:09.745340 608917 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 13:48:09.808489 608917 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:48:09.832661 608917 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:09.863280 608917 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:09.941101 608917 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1124 13:48:09.942521 608917 node_ready.go:35] waiting up to 6m0s for node "no-preload-608395" to be "Ready" ...
I1124 13:48:10.163475 608917 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 13:48:05.418106 607669 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-513442" context rescaled to 1 replicas
W1124 13:48:06.917478 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
W1124 13:48:09.417409 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
I1124 13:48:06.199640 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:06.199675 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:06.235793 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:06.235827 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:06.290172 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:06.290212 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:06.325935 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:06.325975 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:06.359485 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:06.359523 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:06.406787 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:06.406834 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:06.503206 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:06.503251 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:06.520877 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:06.520924 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:06.561472 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:06.561510 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:06.591722 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:06.591748 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:09.128043 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:09.128549 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:09.128609 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:09.128678 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:09.158194 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:09.158216 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:09.158220 572647 cri.go:89] found id: ""
I1124 13:48:09.158229 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:09.158308 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.162575 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.167402 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:09.167472 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:09.196608 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:09.196633 572647 cri.go:89] found id: ""
I1124 13:48:09.196645 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:09.196709 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.201107 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:09.201190 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:09.232265 572647 cri.go:89] found id: ""
I1124 13:48:09.232300 572647 logs.go:282] 0 containers: []
W1124 13:48:09.232311 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:09.232320 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:09.232386 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:09.272990 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:09.273017 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:09.273022 572647 cri.go:89] found id: ""
I1124 13:48:09.273033 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:09.273100 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.278614 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.283409 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:09.283485 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:09.314562 572647 cri.go:89] found id: ""
I1124 13:48:09.314592 572647 logs.go:282] 0 containers: []
W1124 13:48:09.314604 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:09.314611 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:09.314682 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:09.346903 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:09.346963 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:09.346970 572647 cri.go:89] found id: ""
I1124 13:48:09.346979 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:09.347049 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.351444 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.355601 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:09.355675 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:09.387667 572647 cri.go:89] found id: ""
I1124 13:48:09.387697 572647 logs.go:282] 0 containers: []
W1124 13:48:09.387709 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:09.387716 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:09.387779 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:09.417828 572647 cri.go:89] found id: ""
I1124 13:48:09.417854 572647 logs.go:282] 0 containers: []
W1124 13:48:09.417863 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:09.417876 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:09.417894 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:09.518663 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:09.518707 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:09.538049 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:09.538093 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:09.606209 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:09.606232 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:09.606246 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:09.646703 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:09.646736 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:09.708037 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:09.708078 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:09.779698 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:09.779735 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:09.819613 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:09.819663 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:09.867349 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:09.867388 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:09.917580 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:09.917620 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:09.959751 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:09.959793 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:10.006236 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:10.006274 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:10.165110 608917 addons.go:530] duration metric: took 526.764143ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 13:48:10.444998 608917 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-608395" context rescaled to 1 replicas
W1124 13:48:11.948043 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:14.445721 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:11.417485 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
W1124 13:48:13.418201 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
I1124 13:48:12.563487 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:12.564031 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:12.564091 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:12.564151 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:12.598524 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:12.598553 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:12.598559 572647 cri.go:89] found id: ""
I1124 13:48:12.598570 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:12.598654 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.603466 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.608383 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:12.608462 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:12.652395 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:12.652422 572647 cri.go:89] found id: ""
I1124 13:48:12.652433 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:12.652503 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.657966 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:12.658060 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:12.693432 572647 cri.go:89] found id: ""
I1124 13:48:12.693468 572647 logs.go:282] 0 containers: []
W1124 13:48:12.693480 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:12.693489 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:12.693558 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:12.731546 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:12.731572 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:12.731579 572647 cri.go:89] found id: ""
I1124 13:48:12.731590 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:12.731820 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.737055 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.741859 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:12.741953 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:12.776627 572647 cri.go:89] found id: ""
I1124 13:48:12.776652 572647 logs.go:282] 0 containers: []
W1124 13:48:12.776660 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:12.776667 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:12.776735 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:12.809077 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:12.809099 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:12.809102 572647 cri.go:89] found id: ""
I1124 13:48:12.809112 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:12.809166 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.813963 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.818488 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:12.818563 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:12.852844 572647 cri.go:89] found id: ""
I1124 13:48:12.852879 572647 logs.go:282] 0 containers: []
W1124 13:48:12.852891 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:12.852900 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:12.853034 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:12.889177 572647 cri.go:89] found id: ""
I1124 13:48:12.889228 572647 logs.go:282] 0 containers: []
W1124 13:48:12.889240 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:12.889255 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:12.889278 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:12.941108 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:12.941146 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:13.012950 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:13.012998 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:13.059324 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:13.059367 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:13.096188 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:13.096235 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:13.157287 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:13.157338 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:13.198203 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:13.198250 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:13.219729 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:13.219773 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:13.293315 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:13.293338 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:13.293356 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:13.338975 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:13.339029 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:13.385546 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:13.385596 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:13.427130 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:13.427162 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:16.027717 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:16.028251 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:16.028310 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:16.028363 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:16.058811 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:16.058839 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:16.058847 572647 cri.go:89] found id: ""
I1124 13:48:16.058858 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:16.058999 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.063797 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.068208 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:16.068282 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:16.097374 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:16.097404 572647 cri.go:89] found id: ""
I1124 13:48:16.097416 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:16.097484 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.102967 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:16.103045 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:16.133626 572647 cri.go:89] found id: ""
I1124 13:48:16.133660 572647 logs.go:282] 0 containers: []
W1124 13:48:16.133670 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:16.133676 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:16.133746 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:16.165392 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:16.165424 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:16.165431 572647 cri.go:89] found id: ""
I1124 13:48:16.165442 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:16.165507 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.170277 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.174579 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:16.174661 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
W1124 13:48:16.445831 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:18.945868 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:15.917184 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
W1124 13:48:17.917526 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
I1124 13:48:19.416721 607669 node_ready.go:49] node "old-k8s-version-513442" is "Ready"
I1124 13:48:19.416760 607669 node_ready.go:38] duration metric: took 14.503103561s for node "old-k8s-version-513442" to be "Ready" ...
I1124 13:48:19.416778 607669 api_server.go:52] waiting for apiserver process to appear ...
I1124 13:48:19.416833 607669 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 13:48:19.430267 607669 api_server.go:72] duration metric: took 14.90093273s to wait for apiserver process to appear ...
I1124 13:48:19.430299 607669 api_server.go:88] waiting for apiserver healthz status ...
I1124 13:48:19.430326 607669 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1124 13:48:19.436844 607669 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1124 13:48:19.438582 607669 api_server.go:141] control plane version: v1.28.0
I1124 13:48:19.438618 607669 api_server.go:131] duration metric: took 8.311152ms to wait for apiserver health ...
I1124 13:48:19.438632 607669 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 13:48:19.443134 607669 system_pods.go:59] 8 kube-system pods found
I1124 13:48:19.443191 607669 system_pods.go:61] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:19.443200 607669 system_pods.go:61] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:19.443207 607669 system_pods.go:61] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:19.443213 607669 system_pods.go:61] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:19.443219 607669 system_pods.go:61] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:19.443225 607669 system_pods.go:61] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:19.443231 607669 system_pods.go:61] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:19.443240 607669 system_pods.go:61] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:19.443248 607669 system_pods.go:74] duration metric: took 4.608559ms to wait for pod list to return data ...
I1124 13:48:19.443260 607669 default_sa.go:34] waiting for default service account to be created ...
I1124 13:48:19.446125 607669 default_sa.go:45] found service account: "default"
I1124 13:48:19.446157 607669 default_sa.go:55] duration metric: took 2.890045ms for default service account to be created ...
I1124 13:48:19.446170 607669 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 13:48:19.450324 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:19.450375 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:19.450385 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:19.450394 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:19.450408 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:19.450415 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:19.450425 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:19.450434 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:19.450449 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:19.450484 607669 retry.go:31] will retry after 306.547577ms: missing components: kube-dns
I1124 13:48:19.761785 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:19.761821 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:19.761828 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:19.761835 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:19.761839 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:19.761843 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:19.761846 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:19.761850 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:19.761855 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:19.761871 607669 retry.go:31] will retry after 263.639636ms: missing components: kube-dns
I1124 13:48:20.030723 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:20.030764 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:20.030773 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:20.030781 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:20.030787 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:20.030794 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:20.030799 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:20.030804 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:20.030812 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:20.030836 607669 retry.go:31] will retry after 485.23875ms: missing components: kube-dns
I1124 13:48:16.203971 572647 cri.go:89] found id: ""
I1124 13:48:16.204004 572647 logs.go:282] 0 containers: []
W1124 13:48:16.204016 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:16.204025 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:16.204087 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:16.233087 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:16.233113 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:16.233119 572647 cri.go:89] found id: ""
I1124 13:48:16.233130 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:16.233184 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.237937 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.242366 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:16.242450 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:16.273007 572647 cri.go:89] found id: ""
I1124 13:48:16.273034 572647 logs.go:282] 0 containers: []
W1124 13:48:16.273043 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:16.273049 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:16.273100 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:16.302483 572647 cri.go:89] found id: ""
I1124 13:48:16.302518 572647 logs.go:282] 0 containers: []
W1124 13:48:16.302537 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:16.302553 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:16.302575 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:16.360777 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:16.360817 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:16.391672 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:16.391700 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:16.490704 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:16.490743 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:16.530411 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:16.530448 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:16.567070 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:16.567107 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:16.601689 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:16.601728 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:16.646105 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:16.646143 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:16.682522 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:16.682560 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:16.699850 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:16.699887 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:16.759811 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:16.759835 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:16.759853 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:16.795013 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:16.795048 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:19.334057 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:19.334568 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:19.334661 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:19.334733 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:19.365714 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:19.365735 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:19.365739 572647 cri.go:89] found id: ""
I1124 13:48:19.365747 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:19.365800 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.370354 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.374856 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:19.374992 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:19.405492 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:19.405519 572647 cri.go:89] found id: ""
I1124 13:48:19.405529 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:19.405589 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.411364 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:19.411426 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:19.443360 572647 cri.go:89] found id: ""
I1124 13:48:19.443391 572647 logs.go:282] 0 containers: []
W1124 13:48:19.443404 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:19.443412 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:19.443477 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:19.475298 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:19.475324 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:19.475331 572647 cri.go:89] found id: ""
I1124 13:48:19.475341 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:19.475407 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.480369 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.484782 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:19.484863 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:19.514622 572647 cri.go:89] found id: ""
I1124 13:48:19.514666 572647 logs.go:282] 0 containers: []
W1124 13:48:19.514716 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:19.514726 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:19.514807 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:19.550847 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:19.550872 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:19.550877 572647 cri.go:89] found id: ""
I1124 13:48:19.550886 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:19.550963 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.556478 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.561320 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:19.561401 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:19.596190 572647 cri.go:89] found id: ""
I1124 13:48:19.596226 572647 logs.go:282] 0 containers: []
W1124 13:48:19.596238 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:19.596247 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:19.596309 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:19.627382 572647 cri.go:89] found id: ""
I1124 13:48:19.627413 572647 logs.go:282] 0 containers: []
W1124 13:48:19.627424 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:19.627436 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:19.627452 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:19.694796 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:19.694836 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:19.752858 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:19.752896 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:19.788182 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:19.788224 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:19.879216 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:19.879255 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:19.940757 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:19.940776 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:19.940790 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:19.979681 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:19.979726 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:20.020042 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:20.020085 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:20.064463 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:20.064499 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:20.098012 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:20.098044 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:20.132122 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:20.132157 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:20.148958 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:20.148997 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:20.521094 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:20.521123 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Running
I1124 13:48:20.521130 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:20.521133 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:20.521137 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:20.521141 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:20.521145 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:20.521148 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:20.521151 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Running
I1124 13:48:20.521159 607669 system_pods.go:126] duration metric: took 1.074982882s to wait for k8s-apps to be running ...
I1124 13:48:20.521166 607669 system_svc.go:44] waiting for kubelet service to be running ....
I1124 13:48:20.521215 607669 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 13:48:20.535666 607669 system_svc.go:56] duration metric: took 14.486184ms WaitForService to wait for kubelet
I1124 13:48:20.535706 607669 kubeadm.go:587] duration metric: took 16.006375183s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:48:20.535732 607669 node_conditions.go:102] verifying NodePressure condition ...
I1124 13:48:20.538619 607669 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 13:48:20.538646 607669 node_conditions.go:123] node cpu capacity is 8
I1124 13:48:20.538662 607669 node_conditions.go:105] duration metric: took 2.9245ms to run NodePressure ...
I1124 13:48:20.538676 607669 start.go:242] waiting for startup goroutines ...
I1124 13:48:20.538683 607669 start.go:247] waiting for cluster config update ...
I1124 13:48:20.538693 607669 start.go:256] writing updated cluster config ...
I1124 13:48:20.539040 607669 ssh_runner.go:195] Run: rm -f paused
I1124 13:48:20.543325 607669 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:20.547793 607669 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-b5rrl" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.552447 607669 pod_ready.go:94] pod "coredns-5dd5756b68-b5rrl" is "Ready"
I1124 13:48:20.552472 607669 pod_ready.go:86] duration metric: took 4.651627ms for pod "coredns-5dd5756b68-b5rrl" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.556328 607669 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.561689 607669 pod_ready.go:94] pod "etcd-old-k8s-version-513442" is "Ready"
I1124 13:48:20.561717 607669 pod_ready.go:86] duration metric: took 5.363766ms for pod "etcd-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.564634 607669 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.569265 607669 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-513442" is "Ready"
I1124 13:48:20.569291 607669 pod_ready.go:86] duration metric: took 4.631558ms for pod "kube-apiserver-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.572304 607669 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.948397 607669 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-513442" is "Ready"
I1124 13:48:20.948423 607669 pod_ready.go:86] duration metric: took 376.095956ms for pod "kube-controller-manager-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:21.148648 607669 pod_ready.go:83] waiting for pod "kube-proxy-hzfcx" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:21.548255 607669 pod_ready.go:94] pod "kube-proxy-hzfcx" is "Ready"
I1124 13:48:21.548288 607669 pod_ready.go:86] duration metric: took 399.608636ms for pod "kube-proxy-hzfcx" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:21.748744 607669 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:22.147789 607669 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-513442" is "Ready"
I1124 13:48:22.147821 607669 pod_ready.go:86] duration metric: took 399.0528ms for pod "kube-scheduler-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:22.147833 607669 pod_ready.go:40] duration metric: took 1.604464617s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:22.193883 607669 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1124 13:48:22.196207 607669 out.go:203]
W1124 13:48:22.197964 607669 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 13:48:22.199516 607669 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 13:48:22.201541 607669 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-513442" cluster and "default" namespace by default
W1124 13:48:20.947014 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:22.948554 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
I1124 13:48:24.446130 608917 node_ready.go:49] node "no-preload-608395" is "Ready"
I1124 13:48:24.446168 608917 node_ready.go:38] duration metric: took 14.503611427s for node "no-preload-608395" to be "Ready" ...
I1124 13:48:24.446195 608917 api_server.go:52] waiting for apiserver process to appear ...
I1124 13:48:24.446254 608917 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 13:48:24.460952 608917 api_server.go:72] duration metric: took 14.82264088s to wait for apiserver process to appear ...
I1124 13:48:24.460990 608917 api_server.go:88] waiting for apiserver healthz status ...
I1124 13:48:24.461021 608917 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1124 13:48:24.466768 608917 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1124 13:48:24.468117 608917 api_server.go:141] control plane version: v1.34.1
I1124 13:48:24.468151 608917 api_server.go:131] duration metric: took 7.151862ms to wait for apiserver health ...
I1124 13:48:24.468164 608917 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 13:48:24.473836 608917 system_pods.go:59] 8 kube-system pods found
I1124 13:48:24.473891 608917 system_pods.go:61] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.473901 608917 system_pods.go:61] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.473965 608917 system_pods.go:61] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.473980 608917 system_pods.go:61] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.473987 608917 system_pods.go:61] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.473995 608917 system_pods.go:61] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.474001 608917 system_pods.go:61] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.474011 608917 system_pods.go:61] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.474025 608917 system_pods.go:74] duration metric: took 5.853076ms to wait for pod list to return data ...
I1124 13:48:24.474037 608917 default_sa.go:34] waiting for default service account to be created ...
I1124 13:48:24.476681 608917 default_sa.go:45] found service account: "default"
I1124 13:48:24.476712 608917 default_sa.go:55] duration metric: took 2.661232ms for default service account to be created ...
I1124 13:48:24.476724 608917 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 13:48:24.479715 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:24.479757 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.479765 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.479776 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.479782 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.479788 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.479793 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.479798 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.479806 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.479831 608917 retry.go:31] will retry after 257.034103ms: missing components: kube-dns
I1124 13:48:24.740811 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:24.740842 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.740848 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.740854 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.740858 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.740863 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.740866 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.740869 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.740876 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.740892 608917 retry.go:31] will retry after 244.335921ms: missing components: kube-dns
I1124 13:48:24.989021 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:24.989054 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.989061 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.989067 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.989072 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.989077 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.989080 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.989084 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.989089 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.989104 608917 retry.go:31] will retry after 431.238044ms: missing components: kube-dns
I1124 13:48:22.686011 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:22.686450 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:22.686506 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:22.686563 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:22.718842 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:22.718868 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:22.718874 572647 cri.go:89] found id: ""
I1124 13:48:22.718885 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:22.719025 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.724051 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.728627 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:22.728697 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:22.758279 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:22.758305 572647 cri.go:89] found id: ""
I1124 13:48:22.758315 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:22.758378 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.762905 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:22.763025 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:22.796176 572647 cri.go:89] found id: ""
I1124 13:48:22.796207 572647 logs.go:282] 0 containers: []
W1124 13:48:22.796218 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:22.796227 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:22.796293 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:22.828770 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:22.828801 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:22.828815 572647 cri.go:89] found id: ""
I1124 13:48:22.828827 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:22.828886 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.833530 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.837668 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:22.837750 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:22.867760 572647 cri.go:89] found id: ""
I1124 13:48:22.867793 572647 logs.go:282] 0 containers: []
W1124 13:48:22.867806 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:22.867815 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:22.867976 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:22.899275 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:22.899305 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:22.899312 572647 cri.go:89] found id: ""
I1124 13:48:22.899327 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:22.899391 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.903859 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.908121 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:22.908190 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:22.938883 572647 cri.go:89] found id: ""
I1124 13:48:22.938961 572647 logs.go:282] 0 containers: []
W1124 13:48:22.938972 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:22.938980 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:22.939033 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:22.969840 572647 cri.go:89] found id: ""
I1124 13:48:22.969864 572647 logs.go:282] 0 containers: []
W1124 13:48:22.969872 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:22.969882 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:22.969903 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:23.031386 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:23.031411 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:23.031425 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:23.067770 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:23.067805 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:23.104851 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:23.104886 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:23.160621 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:23.160668 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:23.190994 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:23.191026 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:23.226509 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:23.226542 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:23.269082 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:23.269130 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:23.360572 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:23.360613 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:23.399049 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:23.399089 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:23.440241 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:23.440282 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:23.474172 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:23.474212 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:25.992569 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:25.993167 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:25.993241 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:25.993310 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:26.021789 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:26.021816 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:26.021823 572647 cri.go:89] found id: ""
I1124 13:48:26.021834 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:26.021985 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.027084 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.031267 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:26.031350 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:26.063349 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:26.063379 572647 cri.go:89] found id: ""
I1124 13:48:26.063390 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:26.063448 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.068064 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:26.068140 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:26.096106 572647 cri.go:89] found id: ""
I1124 13:48:26.096148 572647 logs.go:282] 0 containers: []
W1124 13:48:26.096158 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:26.096165 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:26.096220 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:26.126156 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:26.126186 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:26.126193 572647 cri.go:89] found id: ""
I1124 13:48:26.126205 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:26.126275 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.131369 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.135595 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:26.135657 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:26.163133 572647 cri.go:89] found id: ""
I1124 13:48:26.163161 572647 logs.go:282] 0 containers: []
W1124 13:48:26.163169 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:26.163187 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:26.163244 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:26.192355 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:26.192378 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:26.192384 572647 cri.go:89] found id: ""
I1124 13:48:26.192394 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:26.192549 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.197316 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:25.424597 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:25.424631 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:25.424636 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:25.424642 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:25.424646 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:25.424650 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:25.424653 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:25.424656 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:25.424663 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:25.424679 608917 retry.go:31] will retry after 458.014987ms: missing components: kube-dns
I1124 13:48:25.886603 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:25.886633 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Running
I1124 13:48:25.886641 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:25.886644 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:25.886649 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:25.886653 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:25.886657 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:25.886660 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:25.886663 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Running
I1124 13:48:25.886671 608917 system_pods.go:126] duration metric: took 1.409940532s to wait for k8s-apps to be running ...
I1124 13:48:25.886680 608917 system_svc.go:44] waiting for kubelet service to be running ....
I1124 13:48:25.886726 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 13:48:25.901294 608917 system_svc.go:56] duration metric: took 14.604723ms WaitForService to wait for kubelet
I1124 13:48:25.901324 608917 kubeadm.go:587] duration metric: took 16.26302303s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:48:25.901343 608917 node_conditions.go:102] verifying NodePressure condition ...
I1124 13:48:25.904190 608917 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 13:48:25.904219 608917 node_conditions.go:123] node cpu capacity is 8
I1124 13:48:25.904234 608917 node_conditions.go:105] duration metric: took 2.88688ms to run NodePressure ...
I1124 13:48:25.904249 608917 start.go:242] waiting for startup goroutines ...
I1124 13:48:25.904256 608917 start.go:247] waiting for cluster config update ...
I1124 13:48:25.904266 608917 start.go:256] writing updated cluster config ...
I1124 13:48:25.904560 608917 ssh_runner.go:195] Run: rm -f paused
I1124 13:48:25.909215 608917 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:25.912986 608917 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-rcf8v" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.917301 608917 pod_ready.go:94] pod "coredns-66bc5c9577-rcf8v" is "Ready"
I1124 13:48:25.917324 608917 pod_ready.go:86] duration metric: took 4.297309ms for pod "coredns-66bc5c9577-rcf8v" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.919442 608917 pod_ready.go:83] waiting for pod "etcd-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.923976 608917 pod_ready.go:94] pod "etcd-no-preload-608395" is "Ready"
I1124 13:48:25.923999 608917 pod_ready.go:86] duration metric: took 4.535115ms for pod "etcd-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.926003 608917 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.930385 608917 pod_ready.go:94] pod "kube-apiserver-no-preload-608395" is "Ready"
I1124 13:48:25.930413 608917 pod_ready.go:86] duration metric: took 4.382406ms for pod "kube-apiserver-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.932261 608917 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:26.313581 608917 pod_ready.go:94] pod "kube-controller-manager-no-preload-608395" is "Ready"
I1124 13:48:26.313615 608917 pod_ready.go:86] duration metric: took 381.333887ms for pod "kube-controller-manager-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:26.514064 608917 pod_ready.go:83] waiting for pod "kube-proxy-5vj5p" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:26.913664 608917 pod_ready.go:94] pod "kube-proxy-5vj5p" is "Ready"
I1124 13:48:26.913702 608917 pod_ready.go:86] duration metric: took 399.60223ms for pod "kube-proxy-5vj5p" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:27.114488 608917 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:27.514056 608917 pod_ready.go:94] pod "kube-scheduler-no-preload-608395" is "Ready"
I1124 13:48:27.514084 608917 pod_ready.go:86] duration metric: took 399.56934ms for pod "kube-scheduler-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:27.514098 608917 pod_ready.go:40] duration metric: took 1.604847792s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:27.561310 608917 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1124 13:48:27.563544 608917 out.go:179] * Done! kubectl is now configured to use "no-preload-608395" cluster and "default" namespace by default
I1124 13:48:26.202352 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:26.202439 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:26.231899 572647 cri.go:89] found id: ""
I1124 13:48:26.231953 572647 logs.go:282] 0 containers: []
W1124 13:48:26.231964 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:26.231973 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:26.232040 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:26.263417 572647 cri.go:89] found id: ""
I1124 13:48:26.263446 572647 logs.go:282] 0 containers: []
W1124 13:48:26.263459 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:26.263473 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:26.263488 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:26.354230 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:26.354265 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:26.389608 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:26.389652 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:26.427040 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:26.427077 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:26.466568 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:26.466603 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:26.503710 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:26.503749 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:26.539150 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:26.539193 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:26.583782 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:26.583825 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:26.617656 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:26.617696 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:26.634777 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:26.634809 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:26.693534 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:26.693559 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:26.693577 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:26.748627 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:26.748668 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:29.280171 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:29.280640 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:29.280694 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:29.280748 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:29.309613 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:29.309638 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:29.309644 572647 cri.go:89] found id: ""
I1124 13:48:29.309660 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:29.309730 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.314623 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.319864 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:29.319962 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:29.348671 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:29.348699 572647 cri.go:89] found id: ""
I1124 13:48:29.348709 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:29.348775 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.353662 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:29.353728 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:29.383017 572647 cri.go:89] found id: ""
I1124 13:48:29.383046 572647 logs.go:282] 0 containers: []
W1124 13:48:29.383058 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:29.383066 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:29.383121 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:29.411238 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:29.411259 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:29.411264 572647 cri.go:89] found id: ""
I1124 13:48:29.411271 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:29.411325 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.415976 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.420189 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:29.420264 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:29.449856 572647 cri.go:89] found id: ""
I1124 13:48:29.449890 572647 logs.go:282] 0 containers: []
W1124 13:48:29.449921 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:29.449929 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:29.450001 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:29.480136 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:29.480164 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:29.480171 572647 cri.go:89] found id: ""
I1124 13:48:29.480181 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:29.480258 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.484998 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.489433 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:29.489504 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:29.519804 572647 cri.go:89] found id: ""
I1124 13:48:29.519841 572647 logs.go:282] 0 containers: []
W1124 13:48:29.519854 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:29.519864 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:29.520048 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:29.549935 572647 cri.go:89] found id: ""
I1124 13:48:29.549964 572647 logs.go:282] 0 containers: []
W1124 13:48:29.549974 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:29.549986 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:29.549997 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:29.593521 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:29.593560 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:29.681751 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:29.681792 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:29.699198 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:29.699232 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:29.759823 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:29.759850 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:29.759863 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:29.798497 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:29.798534 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:29.835677 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:29.835718 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:29.864876 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:29.864923 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:29.898153 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:29.898186 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:29.932035 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:29.932073 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:29.971224 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:29.971258 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:30.026576 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:30.026619 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
b44a9a38266a3 56cc512116c8f 8 seconds ago Running busybox 0 91e7e42c593d0 busybox default
8d4a4dd9d6632 ead0a4a53df89 13 seconds ago Running coredns 0 1c930bc4d6523 coredns-5dd5756b68-b5rrl kube-system
c9c8f51adb6bb 6e38f40d628db 13 seconds ago Running storage-provisioner 0 840fae773d68e storage-provisioner kube-system
1dab1df16e654 409467f978b4a 25 seconds ago Running kindnet-cni 0 30a65fd13bcca kindnet-tpjvb kube-system
0b87cfcc163e3 ea1030da44aa1 28 seconds ago Running kube-proxy 0 555af9e11f935 kube-proxy-hzfcx kube-system
b89c098ff2cb6 bb5e0dde9054c 46 seconds ago Running kube-apiserver 0 b832e9f75c0f1 kube-apiserver-old-k8s-version-513442 kube-system
f7663d3953f0e 4be79c38a4bab 46 seconds ago Running kube-controller-manager 0 06bb689695cce kube-controller-manager-old-k8s-version-513442 kube-system
bdd5c20173350 f6f496300a2ae 46 seconds ago Running kube-scheduler 0 ac1efcdb81d0e kube-scheduler-old-k8s-version-513442 kube-system
5793c7fd11b5c 73deb9a3f7025 46 seconds ago Running etcd 0 3c4129b98c0d7 etcd-old-k8s-version-513442 kube-system
==> containerd <==
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.636050137Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-b5rrl,Uid:4e6c9b7c-5f0a-4c60-8197-20e985a07403,Namespace:kube-system,Attempt:0,} returns sandbox id \"1c930bc4d6523dcc2ff99c9243131fcf23dfc7881b09c013bf55e68b23ecf25e\""
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.639799945Z" level=info msg="CreateContainer within sandbox \"1c930bc4d6523dcc2ff99c9243131fcf23dfc7881b09c013bf55e68b23ecf25e\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.648881001Z" level=info msg="Container 8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89: CDI devices from CRI Config.CDIDevices: []"
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.657829357Z" level=info msg="CreateContainer within sandbox \"1c930bc4d6523dcc2ff99c9243131fcf23dfc7881b09c013bf55e68b23ecf25e\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89\""
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.658662420Z" level=info msg="StartContainer for \"8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89\""
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.659800869Z" level=info msg="connecting to shim 8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89" address="unix:///run/containerd/s/c69a9b00491bdefff20b5fba21aa1d556fb9c3a3bad974c8b8be870ca95e072b" protocol=ttrpc version=3
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.704634320Z" level=info msg="StartContainer for \"c9c8f51adb6bbca8e0f954ad9082c0c66235dce129e152dd682ab69622b44aac\" returns successfully"
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.716701551Z" level=info msg="StartContainer for \"8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89\" returns successfully"
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.659740340Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:e21ee73b-578f-48c9-826d-ab3b4bbb7871,Namespace:default,Attempt:0,}"
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.705643585Z" level=info msg="connecting to shim 91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff" address="unix:///run/containerd/s/a6973921fa6bbb987fab0736637648be3dc3e077c5046184370bd0c127ef00c4" namespace=k8s.io protocol=ttrpc version=3
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.781316455Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:e21ee73b-578f-48c9-826d-ab3b4bbb7871,Namespace:default,Attempt:0,} returns sandbox id \"91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff\""
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.783364521Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.550927147Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.551949670Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396647"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.553332639Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.555518804Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.555999909Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.772594905s"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.556037581Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.557958127Z" level=info msg="CreateContainer within sandbox \"91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.566156418Z" level=info msg="Container b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605: CDI devices from CRI Config.CDIDevices: []"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.572811164Z" level=info msg="CreateContainer within sandbox \"91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.573543998Z" level=info msg="StartContainer for \"b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.574401159Z" level=info msg="connecting to shim b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605" address="unix:///run/containerd/s/a6973921fa6bbb987fab0736637648be3dc3e077c5046184370bd0c127ef00c4" protocol=ttrpc version=3
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.628848926Z" level=info msg="StartContainer for \"b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605\" returns successfully"
Nov 24 13:48:32 old-k8s-version-513442 containerd[663]: E1124 13:48:32.433506 663 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 4c7f44b73086be760ec9e64204f63c5cc5a952c8c1c55ba0b41d8fc3315ce3c7d0259d04847cb8b4561043d4549603f3bccfd9b397eeb814eef159d244d26f39
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:57003 - 26434 "HINFO IN 1735205229727733014.6660763770011463869. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.021751094s
==> describe nodes <==
Name: old-k8s-version-513442
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-513442
kubernetes.io/os=linux
minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab
minikube.k8s.io/name=old-k8s-version-513442
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T13_47_52_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 13:47:48 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-513442
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 13:48:32 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:47:47 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:47:47 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:47:47 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:48:19 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: old-k8s-version-513442
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 7bc159f8-7fe0-4f8d-82dc-0cc733a1645b
Boot ID: 715d4626-373f-499b-b5de-b6d832ce4fe4
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
kube-system coredns-5dd5756b68-b5rrl 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 29s
kube-system etcd-old-k8s-version-513442 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 43s
kube-system kindnet-tpjvb 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 29s
kube-system kube-apiserver-old-k8s-version-513442 250m (3%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system kube-controller-manager-old-k8s-version-513442 200m (2%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system kube-proxy-hzfcx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
kube-system kube-scheduler-old-k8s-version-513442 100m (1%) 0 (0%) 0 (0%) 0 (0%) 41s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28s kube-proxy
Normal Starting 48s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 47s (x8 over 48s) kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 47s (x8 over 48s) kubelet Node old-k8s-version-513442 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 47s (x7 over 48s) kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 47s kubelet Updated Node Allocatable limit across pods
Normal Starting 42s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 41s kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 41s kubelet Node old-k8s-version-513442 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 41s kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientPID
Normal RegisteredNode 30s node-controller Node old-k8s-version-513442 event: Registered Node old-k8s-version-513442 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-513442 status is now: NodeReady
==> dmesg <==
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 0a 91 30 bc 58 af 08 06
[Nov24 12:45] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 9a fb 84 7d 9e 9e 08 06
[ +0.000332] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 0a 91 30 bc 58 af 08 06
[ +25.292047] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff da 14 b4 9b 3e 8f 08 06
[ +0.024207] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 06 8e 71 0b 76 c3 08 06
[ +16.768103] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff de 45 b6 ad fe 93 08 06
[ +5.950770] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 2e b5 4a 70 0a 35 08 06
[Nov24 12:46] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 4e 8b d0 4a da 7e 08 06
[ +0.000557] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 2e b5 4a 70 0a 35 08 06
[ +1.903671] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff c2 1f e8 fc 59 74 08 06
[ +0.000341] IPv4: martian source 10.244.0.4 from 10.244.0.3, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff da 14 b4 9b 3e 8f 08 06
[ +17.535584] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff e2 31 ec 7c 1d 38 08 06
[ +0.000426] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff de 45 b6 ad fe 93 08 06
==> etcd [5793c7fd11b5c568735219e3d193c67360dde88032a438ae332a3e12d7fdf0a5] <==
{"level":"info","ts":"2025-11-24T13:47:46.896061Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","added-peer-id":"dfc97eb0aae75b33","added-peer-peer-urls":["https://192.168.94.2:2380"]}
{"level":"info","ts":"2025-11-24T13:47:47.18298Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-24T13:47:47.183032Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-24T13:47:47.183064Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgPreVoteResp from dfc97eb0aae75b33 at term 1"}
{"level":"info","ts":"2025-11-24T13:47:47.183082Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became candidate at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.18309Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgVoteResp from dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.183102Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became leader at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.183112Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: dfc97eb0aae75b33 elected leader dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.184166Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"dfc97eb0aae75b33","local-member-attributes":"{Name:old-k8s-version-513442 ClientURLs:[https://192.168.94.2:2379]}","request-path":"/0/members/dfc97eb0aae75b33/attributes","cluster-id":"da400bbece288f5a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-24T13:47:47.184441Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T13:47:47.184423Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T13:47:47.184639Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.184677Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-24T13:47:47.184697Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T13:47:47.185356Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.185462Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.185485Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.186127Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"info","ts":"2025-11-24T13:47:47.186272Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-24T13:48:02.673385Z","caller":"traceutil/trace.go:171","msg":"trace[456960560] linearizableReadLoop","detail":"{readStateIndex:331; appliedIndex:330; }","duration":"136.421105ms","start":"2025-11-24T13:48:02.536946Z","end":"2025-11-24T13:48:02.673367Z","steps":["trace[456960560] 'read index received' (duration: 136.248358ms)","trace[456960560] 'applied index is now lower than readState.Index' (duration: 171.987µs)"],"step_count":2}
{"level":"warn","ts":"2025-11-24T13:48:02.673673Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"136.721804ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:351"}
{"level":"info","ts":"2025-11-24T13:48:02.67373Z","caller":"traceutil/trace.go:171","msg":"trace[286257082] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:319; }","duration":"136.809717ms","start":"2025-11-24T13:48:02.536907Z","end":"2025-11-24T13:48:02.673717Z","steps":["trace[286257082] 'agreement among raft nodes before linearized reading' (duration: 136.690513ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T13:48:02.673851Z","caller":"traceutil/trace.go:171","msg":"trace[2009156990] transaction","detail":"{read_only:false; response_revision:319; number_of_response:1; }","duration":"168.350659ms","start":"2025-11-24T13:48:02.505481Z","end":"2025-11-24T13:48:02.673832Z","steps":["trace[2009156990] 'process raft request' (duration: 167.775897ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T13:48:02.673811Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"132.836489ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/default/default\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-24T13:48:02.673892Z","caller":"traceutil/trace.go:171","msg":"trace[1422014017] range","detail":"{range_begin:/registry/serviceaccounts/default/default; range_end:; response_count:0; response_revision:319; }","duration":"132.929171ms","start":"2025-11-24T13:48:02.54095Z","end":"2025-11-24T13:48:02.673879Z","steps":["trace[1422014017] 'agreement among raft nodes before linearized reading' (duration: 132.804065ms)"],"step_count":1}
==> kernel <==
13:48:33 up 2:30, 0 user, load average: 2.03, 2.80, 1.92
Linux old-k8s-version-513442 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [1dab1df16e654e8d2bf5248f41d4e61a9922afd9e9aa99eb10b51ff76d83fd27] <==
I1124 13:48:08.805828 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 13:48:08.806157 1 main.go:139] hostIP = 192.168.94.2
podIP = 192.168.94.2
I1124 13:48:08.806325 1 main.go:148] setting mtu 1500 for CNI
I1124 13:48:08.806347 1 main.go:178] kindnetd IP family: "ipv4"
I1124 13:48:08.806366 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T13:48:09Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 13:48:09.065201 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 13:48:09.065237 1 controller.go:381] "Waiting for informer caches to sync"
I1124 13:48:09.065250 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 13:48:09.205219 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 13:48:09.465641 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 13:48:09.465667 1 metrics.go:72] Registering metrics
I1124 13:48:09.465726 1 controller.go:711] "Syncing nftables rules"
I1124 13:48:19.068504 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1124 13:48:19.068576 1 main.go:301] handling current node
I1124 13:48:29.065440 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1124 13:48:29.065473 1 main.go:301] handling current node
==> kube-apiserver [b89c098ff2cb630c37cf57f5061688d52a419284b629da3305843a9dee1a5dbb] <==
I1124 13:47:48.951700 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1124 13:47:48.951970 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1124 13:47:48.951984 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1124 13:47:48.952108 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1124 13:47:48.952141 1 aggregator.go:166] initial CRD sync complete...
I1124 13:47:48.952149 1 autoregister_controller.go:141] Starting autoregister controller
I1124 13:47:48.952156 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 13:47:48.952165 1 cache.go:39] Caches are synced for autoregister controller
I1124 13:47:48.953986 1 controller.go:624] quota admission added evaluator for: namespaces
I1124 13:47:49.152644 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 13:47:49.858204 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 13:47:49.862657 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 13:47:49.862682 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 13:47:50.422560 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 13:47:50.472548 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 13:47:50.570004 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 13:47:50.579741 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I1124 13:47:50.580884 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 13:47:50.586999 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 13:47:50.885484 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 13:47:51.864040 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 13:47:51.877619 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 13:47:51.890804 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 13:48:04.597347 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1124 13:48:04.651565 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [f7663d3953f0ee1aca9b8f557f4e81791e15502a0a6447b494d2035c4c9b2dfc] <==
I1124 13:48:03.884906 1 shared_informer.go:318] Caches are synced for deployment
I1124 13:48:03.932363 1 shared_informer.go:318] Caches are synced for resource quota
I1124 13:48:03.941297 1 shared_informer.go:318] Caches are synced for resource quota
I1124 13:48:04.243318 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 13:48:04.243355 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 13:48:04.258877 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 13:48:04.607851 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-hzfcx"
I1124 13:48:04.611600 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-tpjvb"
I1124 13:48:04.656277 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1124 13:48:04.748220 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-bcd4m"
I1124 13:48:04.756616 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-b5rrl"
I1124 13:48:04.767398 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="111.018323ms"
I1124 13:48:04.782835 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="15.361034ms"
I1124 13:48:04.782967 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="91.68µs"
I1124 13:48:04.940856 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 13:48:04.951934 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-bcd4m"
I1124 13:48:04.962829 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.807545ms"
I1124 13:48:04.970616 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.726674ms"
I1124 13:48:04.970784 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="88.42µs"
I1124 13:48:19.202453 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="121.753µs"
I1124 13:48:19.220547 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="97.147µs"
I1124 13:48:20.044339 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="114.847µs"
I1124 13:48:20.080458 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.482374ms"
I1124 13:48:20.080575 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="70.63µs"
I1124 13:48:23.770117 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [0b87cfcc163e379c4e72aa8c64739d9d13a801c140b5fabe7cbbc11022cfd12a] <==
I1124 13:48:05.277959 1 server_others.go:69] "Using iptables proxy"
I1124 13:48:05.288147 1 node.go:141] Successfully retrieved node IP: 192.168.94.2
I1124 13:48:05.312455 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 13:48:05.315014 1 server_others.go:152] "Using iptables Proxier"
I1124 13:48:05.315055 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 13:48:05.315064 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 13:48:05.315106 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 13:48:05.315978 1 server.go:846] "Version info" version="v1.28.0"
I1124 13:48:05.316072 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 13:48:05.317668 1 config.go:188] "Starting service config controller"
I1124 13:48:05.317713 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 13:48:05.317754 1 config.go:315] "Starting node config controller"
I1124 13:48:05.317762 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 13:48:05.318091 1 config.go:97] "Starting endpoint slice config controller"
I1124 13:48:05.318114 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 13:48:05.418055 1 shared_informer.go:318] Caches are synced for service config
I1124 13:48:05.418104 1 shared_informer.go:318] Caches are synced for node config
I1124 13:48:05.419230 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [bdd5c20173350449ff23a9ee9a791fe034c518afc7784448209ad9b0a5c32a9f] <==
W1124 13:47:49.773882 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1124 13:47:49.773941 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1124 13:47:49.817194 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1124 13:47:49.817241 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1124 13:47:49.898465 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 13:47:49.898514 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 13:47:49.973231 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 13:47:49.973807 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 13:47:49.975515 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 13:47:49.975624 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 13:47:50.044243 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1124 13:47:50.044284 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1124 13:47:50.065787 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1124 13:47:50.065828 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1124 13:47:50.067051 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1124 13:47:50.067084 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1124 13:47:50.088454 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1124 13:47:50.088492 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1124 13:47:50.094062 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1124 13:47:50.094103 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1124 13:47:50.176377 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 13:47:50.176425 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 13:47:50.188050 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 13:47:50.188094 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
I1124 13:47:51.410574 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 13:48:03 old-k8s-version-513442 kubelet[1521]: I1124 13:48:03.736815 1521 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.621236 1521 topology_manager.go:215] "Topology Admit Handler" podUID="f4ba208a-1a78-46ae-9684-ff3309400852" podNamespace="kube-system" podName="kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.628198 1521 topology_manager.go:215] "Topology Admit Handler" podUID="c7df115a-8394-4f80-ac6c-5b1fc95337b5" podNamespace="kube-system" podName="kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.701758 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/c7df115a-8394-4f80-ac6c-5b1fc95337b5-xtables-lock\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702003 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cdcx\" (UniqueName: \"kubernetes.io/projected/f4ba208a-1a78-46ae-9684-ff3309400852-kube-api-access-6cdcx\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702157 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/c7df115a-8394-4f80-ac6c-5b1fc95337b5-cni-cfg\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702290 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c7df115a-8394-4f80-ac6c-5b1fc95337b5-lib-modules\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702379 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnddq\" (UniqueName: \"kubernetes.io/projected/c7df115a-8394-4f80-ac6c-5b1fc95337b5-kube-api-access-cnddq\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702452 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/f4ba208a-1a78-46ae-9684-ff3309400852-kube-proxy\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702483 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/f4ba208a-1a78-46ae-9684-ff3309400852-xtables-lock\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702513 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f4ba208a-1a78-46ae-9684-ff3309400852-lib-modules\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:06 old-k8s-version-513442 kubelet[1521]: I1124 13:48:06.009542 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-hzfcx" podStartSLOduration=2.00948849 podCreationTimestamp="2025-11-24 13:48:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:48:06.009255456 +0000 UTC m=+14.175181609" watchObservedRunningTime="2025-11-24 13:48:06.00948849 +0000 UTC m=+14.175414641"
Nov 24 13:48:09 old-k8s-version-513442 kubelet[1521]: I1124 13:48:09.017801 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-tpjvb" podStartSLOduration=2.028995374 podCreationTimestamp="2025-11-24 13:48:04 +0000 UTC" firstStartedPulling="2025-11-24 13:48:05.423030434 +0000 UTC m=+13.588956573" lastFinishedPulling="2025-11-24 13:48:08.411777827 +0000 UTC m=+16.577703968" observedRunningTime="2025-11-24 13:48:09.017454231 +0000 UTC m=+17.183380385" watchObservedRunningTime="2025-11-24 13:48:09.017742769 +0000 UTC m=+17.183668923"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.126026 1521 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.199313 1521 topology_manager.go:215] "Topology Admit Handler" podUID="65efb270-100a-4e7c-bee8-24de1df28586" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.202110 1521 topology_manager.go:215] "Topology Admit Handler" podUID="4e6c9b7c-5f0a-4c60-8197-20e985a07403" podNamespace="kube-system" podName="coredns-5dd5756b68-b5rrl"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.296963 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84ccn\" (UniqueName: \"kubernetes.io/projected/65efb270-100a-4e7c-bee8-24de1df28586-kube-api-access-84ccn\") pod \"storage-provisioner\" (UID: \"65efb270-100a-4e7c-bee8-24de1df28586\") " pod="kube-system/storage-provisioner"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.297219 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/65efb270-100a-4e7c-bee8-24de1df28586-tmp\") pod \"storage-provisioner\" (UID: \"65efb270-100a-4e7c-bee8-24de1df28586\") " pod="kube-system/storage-provisioner"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.297296 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj4xm\" (UniqueName: \"kubernetes.io/projected/4e6c9b7c-5f0a-4c60-8197-20e985a07403-kube-api-access-sj4xm\") pod \"coredns-5dd5756b68-b5rrl\" (UID: \"4e6c9b7c-5f0a-4c60-8197-20e985a07403\") " pod="kube-system/coredns-5dd5756b68-b5rrl"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.297327 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e6c9b7c-5f0a-4c60-8197-20e985a07403-config-volume\") pod \"coredns-5dd5756b68-b5rrl\" (UID: \"4e6c9b7c-5f0a-4c60-8197-20e985a07403\") " pod="kube-system/coredns-5dd5756b68-b5rrl"
Nov 24 13:48:20 old-k8s-version-513442 kubelet[1521]: I1124 13:48:20.055454 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-b5rrl" podStartSLOduration=16.055384325 podCreationTimestamp="2025-11-24 13:48:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:48:20.043996165 +0000 UTC m=+28.209922315" watchObservedRunningTime="2025-11-24 13:48:20.055384325 +0000 UTC m=+28.221310494"
Nov 24 13:48:20 old-k8s-version-513442 kubelet[1521]: I1124 13:48:20.072835 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=15.072769008 podCreationTimestamp="2025-11-24 13:48:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:48:20.05633827 +0000 UTC m=+28.222264421" watchObservedRunningTime="2025-11-24 13:48:20.072769008 +0000 UTC m=+28.238695171"
Nov 24 13:48:22 old-k8s-version-513442 kubelet[1521]: I1124 13:48:22.349894 1521 topology_manager.go:215] "Topology Admit Handler" podUID="e21ee73b-578f-48c9-826d-ab3b4bbb7871" podNamespace="default" podName="busybox"
Nov 24 13:48:22 old-k8s-version-513442 kubelet[1521]: I1124 13:48:22.417169 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmgg8\" (UniqueName: \"kubernetes.io/projected/e21ee73b-578f-48c9-826d-ab3b4bbb7871-kube-api-access-mmgg8\") pod \"busybox\" (UID: \"e21ee73b-578f-48c9-826d-ab3b4bbb7871\") " pod="default/busybox"
Nov 24 13:48:26 old-k8s-version-513442 kubelet[1521]: I1124 13:48:26.061183 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.287793929 podCreationTimestamp="2025-11-24 13:48:22 +0000 UTC" firstStartedPulling="2025-11-24 13:48:22.783005961 +0000 UTC m=+30.948932098" lastFinishedPulling="2025-11-24 13:48:25.556333595 +0000 UTC m=+33.722259740" observedRunningTime="2025-11-24 13:48:26.061015161 +0000 UTC m=+34.226941311" watchObservedRunningTime="2025-11-24 13:48:26.061121571 +0000 UTC m=+34.227047722"
==> storage-provisioner [c9c8f51adb6bbca8e0f954ad9082c0c66235dce129e152dd682ab69622b44aac] <==
I1124 13:48:19.713946 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 13:48:19.725060 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 13:48:19.725122 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 13:48:19.732798 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 13:48:19.733028 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-513442_df294b40-30a6-4b8c-83ff-3d897f2504d8!
I1124 13:48:19.733030 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"938f90ea-7103-4290-984c-f5e7c1aae849", APIVersion:"v1", ResourceVersion:"443", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-513442_df294b40-30a6-4b8c-83ff-3d897f2504d8 became leader
I1124 13:48:19.833675 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-513442_df294b40-30a6-4b8c-83ff-3d897f2504d8!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-513442 -n old-k8s-version-513442
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-513442 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-513442
helpers_test.go:243: (dbg) docker inspect old-k8s-version-513442:
-- stdout --
[
{
"Id": "13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc",
"Created": "2025-11-24T13:47:35.092444426Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 609088,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-24T13:47:35.135903717Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/hostname",
"HostsPath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/hosts",
"LogPath": "/var/lib/docker/containers/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc/13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc-json.log",
"Name": "/old-k8s-version-513442",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-513442:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-513442",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "13426d2cf76c27dd9f2a390d750a5229384c014f5a7850e15adbf074b454afbc",
"LowerDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338-init/diff:/var/lib/docker/overlay2/0f013e03fd0eaee4efc608fb0376e7d3e8ba628388f5191310c2259ab273ad26/diff",
"MergedDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338/merged",
"UpperDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338/diff",
"WorkDir": "/var/lib/docker/overlay2/bd85d41ae72067109a66add256d4bca169e9772c5d88f4cadf18fe98e5e00338/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-513442",
"Source": "/var/lib/docker/volumes/old-k8s-version-513442/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-513442",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-513442",
"name.minikube.sigs.k8s.io": "old-k8s-version-513442",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "712b075dd23c6c1fbc5bbaa3b37767187ba4a40be8134789ce23d7e72a4abc25",
"SandboxKey": "/var/run/docker/netns/712b075dd23c",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33435"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33436"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33440"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33437"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33439"
}
]
},
"Networks": {
"old-k8s-version-513442": {
"IPAMConfig": {
"IPv4Address": "192.168.94.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "57f535f2d59b940a7e2130a9a6bcf664e3f052e878c97575bfeea5e13ed58e73",
"EndpointID": "439facefab95f9d1822733d1b1004570b6d417a88dc9a1ee26ae6d774889308f",
"Gateway": "192.168.94.1",
"IPAddress": "192.168.94.2",
"MacAddress": "46:21:b5:12:37:e7",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-513442",
"13426d2cf76c"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-513442 -n old-k8s-version-513442
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-513442 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-513442 logs -n 25: (1.184466791s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ ssh │ -p cilium-355661 sudo cat /lib/systemd/system/containerd.service │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo cat /etc/containerd/config.toml │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo containerd config dump │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo systemctl status crio --all --full --no-pager │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo systemctl cat crio --no-pager │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ start │ -p NoKubernetes-787855 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:46 UTC │
│ ssh │ -p cilium-355661 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ ssh │ -p cilium-355661 sudo crio config │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ │
│ delete │ -p cilium-355661 │ cilium-355661 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:46 UTC │
│ start │ -p force-systemd-flag-775412 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-775412 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:47 UTC │
│ delete │ -p NoKubernetes-787855 │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:46 UTC │
│ start │ -p NoKubernetes-787855 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:46 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ force-systemd-flag-775412 ssh cat /etc/containerd/config.toml │ force-systemd-flag-775412 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ delete │ -p force-systemd-flag-775412 │ force-systemd-flag-775412 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ -p NoKubernetes-787855 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ │
│ start │ -p cert-options-342221 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ stop │ -p NoKubernetes-787855 │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ start │ -p NoKubernetes-787855 --driver=docker --container-runtime=containerd │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ cert-options-342221 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ ssh │ -p cert-options-342221 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ delete │ -p cert-options-342221 │ cert-options-342221 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ start │ -p old-k8s-version-513442 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-513442 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:48 UTC │
│ ssh │ -p NoKubernetes-787855 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ │
│ delete │ -p NoKubernetes-787855 │ NoKubernetes-787855 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:47 UTC │
│ start │ -p no-preload-608395 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-608395 │ jenkins │ v1.37.0 │ 24 Nov 25 13:47 UTC │ 24 Nov 25 13:48 UTC │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/24 13:47:35
Running on machine: ubuntu-20-agent-6
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1124 13:47:35.072446 608917 out.go:360] Setting OutFile to fd 1 ...
I1124 13:47:35.072749 608917 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:47:35.072763 608917 out.go:374] Setting ErrFile to fd 2...
I1124 13:47:35.072768 608917 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1124 13:47:35.073046 608917 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21932-370498/.minikube/bin
I1124 13:47:35.073526 608917 out.go:368] Setting JSON to false
I1124 13:47:35.074857 608917 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-6","uptime":8994,"bootTime":1763983061,"procs":340,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1124 13:47:35.074959 608917 start.go:143] virtualization: kvm guest
I1124 13:47:35.077490 608917 out.go:179] * [no-preload-608395] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1124 13:47:35.079255 608917 out.go:179] - MINIKUBE_LOCATION=21932
I1124 13:47:35.079255 608917 notify.go:221] Checking for updates...
I1124 13:47:35.080776 608917 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1124 13:47:35.082396 608917 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21932-370498/kubeconfig
I1124 13:47:35.083932 608917 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21932-370498/.minikube
I1124 13:47:35.085251 608917 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1124 13:47:35.086603 608917 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1124 13:47:35.089427 608917 config.go:182] Loaded profile config "cert-expiration-099863": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:47:35.089575 608917 config.go:182] Loaded profile config "kubernetes-upgrade-358357": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:47:35.089706 608917 config.go:182] Loaded profile config "old-k8s-version-513442": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:47:35.089837 608917 driver.go:422] Setting default libvirt URI to qemu:///system
I1124 13:47:35.114581 608917 docker.go:124] docker version: linux-29.0.3:Docker Engine - Community
I1124 13:47:35.114769 608917 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:47:35.180508 608917 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:58 OomKillDisable:false NGoroutines:78 SystemTime:2025-11-24 13:47:35.169616068 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 13:47:35.180627 608917 docker.go:319] overlay module found
I1124 13:47:35.182258 608917 out.go:179] * Using the docker driver based on user configuration
I1124 13:47:35.183642 608917 start.go:309] selected driver: docker
I1124 13:47:35.183663 608917 start.go:927] validating driver "docker" against <nil>
I1124 13:47:35.183675 608917 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1124 13:47:35.184437 608917 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1124 13:47:35.249663 608917 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:58 OomKillDisable:false NGoroutines:78 SystemTime:2025-11-24 13:47:35.237755455 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652076544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-6 Labels:[] ExperimentalBuild:false ServerVersion:29.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1124 13:47:35.249975 608917 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1124 13:47:35.250402 608917 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:47:35.252318 608917 out.go:179] * Using Docker driver with root privileges
I1124 13:47:35.254354 608917 cni.go:84] Creating CNI manager for ""
I1124 13:47:35.254446 608917 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:35.254457 608917 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1124 13:47:35.254652 608917 start.go:353] cluster config:
{Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:47:35.256201 608917 out.go:179] * Starting "no-preload-608395" primary control-plane node in "no-preload-608395" cluster
I1124 13:47:35.257392 608917 cache.go:134] Beginning downloading kic base image for docker with containerd
I1124 13:47:35.258857 608917 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1124 13:47:35.260330 608917 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 13:47:35.260404 608917 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1124 13:47:35.260496 608917 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/config.json ...
I1124 13:47:35.260537 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/config.json: {Name:mk2f4d5eff7070dcec35f39f30e01cd0b3fcce8c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:35.260546 608917 cache.go:107] acquiring lock: {Name:mk28ec677a69a6f418643b8b89331fa25b8c42f3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260546 608917 cache.go:107] acquiring lock: {Name:mkad3cbb6fa2e7f41e4d7c0e1e3c74156dc55521 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260557 608917 cache.go:107] acquiring lock: {Name:mk7aef7fc4ff6e4e4541fdeb1d5e26c13a66856b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260584 608917 cache.go:107] acquiring lock: {Name:mk586ecbe7f4b4aab48f8ad28d0d7b1848898c9c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260604 608917 cache.go:107] acquiring lock: {Name:mkf548ea8c9721a4e4ad1e37073c3deea8530810 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260622 608917 cache.go:107] acquiring lock: {Name:mk1ce266bd6b9003a6a371facbc84809dce0c3c8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260651 608917 cache.go:107] acquiring lock: {Name:mk687b2dcc146d43e9d607f472f2f08a2307baed Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260663 608917 cache.go:107] acquiring lock: {Name:mk4b559f0fdae6e96edea26981618bf8d9d50b2d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.260712 608917 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:35.260755 608917 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:35.260801 608917 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:35.260819 608917 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:35.260852 608917 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:35.260858 608917 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1124 13:47:35.260727 608917 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:35.261039 608917 cache.go:115] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1124 13:47:35.261050 608917 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 523.955µs
I1124 13:47:35.261069 608917 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1124 13:47:35.262249 608917 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:35.262277 608917 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:35.262359 608917 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:35.262407 608917 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1124 13:47:35.262461 608917 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:35.262522 608917 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:35.262735 608917 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:35.285963 608917 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1124 13:47:35.285989 608917 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1124 13:47:35.286014 608917 cache.go:240] Successfully downloaded all kic artifacts
I1124 13:47:35.286057 608917 start.go:360] acquireMachinesLock for no-preload-608395: {Name:mkc9d1cf0cec9be2b369f1e47c690fc0399e88e2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1124 13:47:35.286191 608917 start.go:364] duration metric: took 102.178µs to acquireMachinesLock for "no-preload-608395"
I1124 13:47:35.286224 608917 start.go:93] Provisioning new machine with config: &{Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:47:35.286330 608917 start.go:125] createHost starting for "" (driver="docker")
I1124 13:47:30.558317 607669 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 13:47:30.558626 607669 start.go:159] libmachine.API.Create for "old-k8s-version-513442" (driver="docker")
I1124 13:47:30.558656 607669 client.go:173] LocalClient.Create starting
I1124 13:47:30.558725 607669 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem
I1124 13:47:30.558754 607669 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:30.558772 607669 main.go:143] libmachine: Parsing certificate...
I1124 13:47:30.558826 607669 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem
I1124 13:47:30.558849 607669 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:30.558860 607669 main.go:143] libmachine: Parsing certificate...
I1124 13:47:30.559212 607669 cli_runner.go:164] Run: docker network inspect old-k8s-version-513442 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 13:47:30.577139 607669 cli_runner.go:211] docker network inspect old-k8s-version-513442 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 13:47:30.577245 607669 network_create.go:284] running [docker network inspect old-k8s-version-513442] to gather additional debugging logs...
I1124 13:47:30.577276 607669 cli_runner.go:164] Run: docker network inspect old-k8s-version-513442
W1124 13:47:30.593786 607669 cli_runner.go:211] docker network inspect old-k8s-version-513442 returned with exit code 1
I1124 13:47:30.593826 607669 network_create.go:287] error running [docker network inspect old-k8s-version-513442]: docker network inspect old-k8s-version-513442: exit status 1
stdout:
[]
stderr:
Error response from daemon: network old-k8s-version-513442 not found
I1124 13:47:30.593854 607669 network_create.go:289] output of [docker network inspect old-k8s-version-513442]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network old-k8s-version-513442 not found
** /stderr **
I1124 13:47:30.594026 607669 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:30.613315 607669 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-8afb578efdfa IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:7a:5e:46:43:aa:fe} reservation:<nil>}
I1124 13:47:30.614364 607669 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-ca3a55f53176 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ce:98:62:4c:91:8f} reservation:<nil>}
I1124 13:47:30.614827 607669 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-e11236ccf9ba IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:36:3b:80:be:95:34} reservation:<nil>}
I1124 13:47:30.615410 607669 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-35b7bf6fd97a IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:5a:12:4e:d4:19:26} reservation:<nil>}
I1124 13:47:30.616018 607669 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-1f5932eecbe7 IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:aa:ff:d3:cd:de:0f} reservation:<nil>}
I1124 13:47:30.617269 607669 network.go:206] using free private subnet 192.168.94.0/24: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e7fa00}
I1124 13:47:30.617308 607669 network_create.go:124] attempt to create docker network old-k8s-version-513442 192.168.94.0/24 with gateway 192.168.94.1 and MTU of 1500 ...
I1124 13:47:30.617398 607669 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.94.0/24 --gateway=192.168.94.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=old-k8s-version-513442 old-k8s-version-513442
I1124 13:47:30.671102 607669 network_create.go:108] docker network old-k8s-version-513442 192.168.94.0/24 created
I1124 13:47:30.671150 607669 kic.go:121] calculated static IP "192.168.94.2" for the "old-k8s-version-513442" container
I1124 13:47:30.671218 607669 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 13:47:30.689078 607669 cli_runner.go:164] Run: docker volume create old-k8s-version-513442 --label name.minikube.sigs.k8s.io=old-k8s-version-513442 --label created_by.minikube.sigs.k8s.io=true
I1124 13:47:30.709312 607669 oci.go:103] Successfully created a docker volume old-k8s-version-513442
I1124 13:47:30.709408 607669 cli_runner.go:164] Run: docker run --rm --name old-k8s-version-513442-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-513442 --entrypoint /usr/bin/test -v old-k8s-version-513442:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 13:47:31.132905 607669 oci.go:107] Successfully prepared a docker volume old-k8s-version-513442
I1124 13:47:31.132980 607669 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:47:31.132992 607669 kic.go:194] Starting extracting preloaded images to volume ...
I1124 13:47:31.133075 607669 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-370498/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-513442:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir
I1124 13:47:35.011677 607669 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21932-370498/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v old-k8s-version-513442:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -I lz4 -xf /preloaded.tar -C /extractDir: (3.878547269s)
I1124 13:47:35.011716 607669 kic.go:203] duration metric: took 3.878721361s to extract preloaded images to volume ...
W1124 13:47:35.011796 607669 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 13:47:35.011829 607669 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 13:47:35.011871 607669 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 13:47:35.073961 607669 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname old-k8s-version-513442 --name old-k8s-version-513442 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=old-k8s-version-513442 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=old-k8s-version-513442 --network old-k8s-version-513442 --ip 192.168.94.2 --volume old-k8s-version-513442:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 13:47:32.801968 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:32.802485 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:47:32.802542 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:32.802595 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:32.832902 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:32.832956 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:32.832963 572647 cri.go:89] found id: ""
I1124 13:47:32.832972 572647 logs.go:282] 2 containers: [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:32.833038 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.837621 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.841927 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:32.842013 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:32.877193 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:32.877214 572647 cri.go:89] found id: ""
I1124 13:47:32.877223 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:32.877290 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.882239 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:32.882329 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:32.912677 572647 cri.go:89] found id: ""
I1124 13:47:32.912709 572647 logs.go:282] 0 containers: []
W1124 13:47:32.912727 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:32.912735 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:32.912799 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:32.942634 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:32.942656 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:32.942662 572647 cri.go:89] found id: ""
I1124 13:47:32.942672 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:32.942735 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.947427 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:32.951442 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:32.951519 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:32.982583 572647 cri.go:89] found id: ""
I1124 13:47:32.982614 572647 logs.go:282] 0 containers: []
W1124 13:47:32.982626 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:32.982635 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:32.982706 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:33.013412 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:33.013432 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:33.013435 572647 cri.go:89] found id: ""
I1124 13:47:33.013444 572647 logs.go:282] 2 containers: [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:33.013492 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:33.017848 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:33.021955 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:33.022038 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:33.055691 572647 cri.go:89] found id: ""
I1124 13:47:33.055722 572647 logs.go:282] 0 containers: []
W1124 13:47:33.055733 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:33.055743 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:33.055822 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:33.086844 572647 cri.go:89] found id: ""
I1124 13:47:33.086868 572647 logs.go:282] 0 containers: []
W1124 13:47:33.086877 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:33.086887 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:33.086904 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:33.140737 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:33.140775 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:33.185221 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:33.185259 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:33.218642 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:33.218669 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:33.251506 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:33.251634 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:33.346627 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:33.346672 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:33.363530 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:33.363571 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:33.400997 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:33.401042 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:33.446051 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:33.446088 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:33.484418 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:33.484465 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:33.537056 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:33.537093 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:47:33.611727 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:47:33.611762 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:33.611778 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:36.150015 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:36.150435 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:47:36.150499 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:36.150559 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:36.181496 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:36.181524 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:36.181530 572647 cri.go:89] found id: ""
I1124 13:47:36.181541 572647 logs.go:282] 2 containers: [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:36.181626 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.186587 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.190995 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:36.191076 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:35.288531 608917 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1124 13:47:35.288826 608917 start.go:159] libmachine.API.Create for "no-preload-608395" (driver="docker")
I1124 13:47:35.288879 608917 client.go:173] LocalClient.Create starting
I1124 13:47:35.288981 608917 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem
I1124 13:47:35.289027 608917 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:35.289053 608917 main.go:143] libmachine: Parsing certificate...
I1124 13:47:35.289129 608917 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem
I1124 13:47:35.289159 608917 main.go:143] libmachine: Decoding PEM data...
I1124 13:47:35.289172 608917 main.go:143] libmachine: Parsing certificate...
I1124 13:47:35.289667 608917 cli_runner.go:164] Run: docker network inspect no-preload-608395 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1124 13:47:35.309178 608917 cli_runner.go:211] docker network inspect no-preload-608395 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1124 13:47:35.309257 608917 network_create.go:284] running [docker network inspect no-preload-608395] to gather additional debugging logs...
I1124 13:47:35.309283 608917 cli_runner.go:164] Run: docker network inspect no-preload-608395
W1124 13:47:35.328323 608917 cli_runner.go:211] docker network inspect no-preload-608395 returned with exit code 1
I1124 13:47:35.328350 608917 network_create.go:287] error running [docker network inspect no-preload-608395]: docker network inspect no-preload-608395: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-608395 not found
I1124 13:47:35.328362 608917 network_create.go:289] output of [docker network inspect no-preload-608395]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-608395 not found
** /stderr **
I1124 13:47:35.328448 608917 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:35.351281 608917 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-8afb578efdfa IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:7a:5e:46:43:aa:fe} reservation:<nil>}
I1124 13:47:35.352105 608917 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-ca3a55f53176 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:ce:98:62:4c:91:8f} reservation:<nil>}
I1124 13:47:35.352583 608917 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-e11236ccf9ba IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:36:3b:80:be:95:34} reservation:<nil>}
I1124 13:47:35.353066 608917 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-35b7bf6fd97a IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:5a:12:4e:d4:19:26} reservation:<nil>}
I1124 13:47:35.353566 608917 network.go:211] skipping subnet 192.168.85.0/24 that is taken: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName:br-1f5932eecbe7 IfaceIPv4:192.168.85.1 IfaceMTU:1500 IfaceMAC:aa:ff:d3:cd:de:0f} reservation:<nil>}
I1124 13:47:35.354145 608917 network.go:211] skipping subnet 192.168.94.0/24 that is taken: &{IP:192.168.94.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.94.0/24 Gateway:192.168.94.1 ClientMin:192.168.94.2 ClientMax:192.168.94.254 Broadcast:192.168.94.255 IsPrivate:true Interface:{IfaceName:br-57f535f2d59b IfaceIPv4:192.168.94.1 IfaceMTU:1500 IfaceMAC:6e:28:a9:1e:8a:96} reservation:<nil>}
I1124 13:47:35.354775 608917 network.go:206] using free private subnet 192.168.103.0/24: &{IP:192.168.103.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.103.0/24 Gateway:192.168.103.1 ClientMin:192.168.103.2 ClientMax:192.168.103.254 Broadcast:192.168.103.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001d86bc0}
I1124 13:47:35.354805 608917 network_create.go:124] attempt to create docker network no-preload-608395 192.168.103.0/24 with gateway 192.168.103.1 and MTU of 1500 ...
I1124 13:47:35.354861 608917 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.103.0/24 --gateway=192.168.103.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-608395 no-preload-608395
I1124 13:47:35.432539 608917 network_create.go:108] docker network no-preload-608395 192.168.103.0/24 created
I1124 13:47:35.432598 608917 kic.go:121] calculated static IP "192.168.103.2" for the "no-preload-608395" container
I1124 13:47:35.432695 608917 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1124 13:47:35.453593 608917 cli_runner.go:164] Run: docker volume create no-preload-608395 --label name.minikube.sigs.k8s.io=no-preload-608395 --label created_by.minikube.sigs.k8s.io=true
I1124 13:47:35.471825 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1124 13:47:35.475329 608917 oci.go:103] Successfully created a docker volume no-preload-608395
I1124 13:47:35.475418 608917 cli_runner.go:164] Run: docker run --rm --name no-preload-608395-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-608395 --entrypoint /usr/bin/test -v no-preload-608395:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1124 13:47:35.484374 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1124 13:47:35.522730 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1124 13:47:35.528813 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1124 13:47:35.529239 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1124 13:47:35.541677 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1124 13:47:35.561542 608917 cache.go:162] opening: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1124 13:47:35.640840 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1124 13:47:35.640868 608917 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 380.250244ms
I1124 13:47:35.640883 608917 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1124 13:47:35.985260 608917 oci.go:107] Successfully prepared a docker volume no-preload-608395
I1124 13:47:35.985319 608917 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1124 13:47:35.985414 608917 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1124 13:47:35.985453 608917 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1124 13:47:35.985506 608917 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1124 13:47:36.047047 608917 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-608395 --name no-preload-608395 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-608395 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-608395 --network no-preload-608395 --ip 192.168.103.2 --volume no-preload-608395:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1124 13:47:36.258467 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1124 13:47:36.258503 608917 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 997.955969ms
I1124 13:47:36.258519 608917 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1124 13:47:36.410125 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Running}}
I1124 13:47:36.432289 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:47:36.453312 608917 cli_runner.go:164] Run: docker exec no-preload-608395 stat /var/lib/dpkg/alternatives/iptables
I1124 13:47:36.504193 608917 oci.go:144] the created container "no-preload-608395" has a running status.
I1124 13:47:36.504226 608917 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa...
I1124 13:47:36.604837 608917 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 13:47:36.631267 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:47:36.655799 608917 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 13:47:36.655830 608917 kic_runner.go:114] Args: [docker exec --privileged no-preload-608395 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 13:47:36.705661 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:47:36.729778 608917 machine.go:94] provisionDockerMachine start ...
I1124 13:47:36.729884 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:36.756901 608917 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:36.757367 608917 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33441 <nil> <nil>}
I1124 13:47:36.757380 608917 main.go:143] libmachine: About to run SSH command:
hostname
I1124 13:47:36.758446 608917 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1124 13:47:37.510037 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1124 13:47:37.510068 608917 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 2.249448579s
I1124 13:47:37.510081 608917 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1124 13:47:37.572176 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1124 13:47:37.572211 608917 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 2.31168357s
I1124 13:47:37.572229 608917 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1124 13:47:37.595833 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1124 13:47:37.595868 608917 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 2.335217312s
I1124 13:47:37.595886 608917 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1124 13:47:37.719899 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1124 13:47:37.719956 608917 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 2.45935214s
I1124 13:47:37.719969 608917 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1124 13:47:38.059972 608917 cache.go:157] /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1124 13:47:38.060022 608917 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 2.799433794s
I1124 13:47:38.060036 608917 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1124 13:47:38.060055 608917 cache.go:87] Successfully saved all images to host disk.
I1124 13:47:39.915534 608917 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-608395
I1124 13:47:39.915567 608917 ubuntu.go:182] provisioning hostname "no-preload-608395"
I1124 13:47:39.915651 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:39.936421 608917 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:39.936658 608917 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33441 <nil> <nil>}
I1124 13:47:39.936672 608917 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-608395 && echo "no-preload-608395" | sudo tee /etc/hostname
I1124 13:47:35.415632 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Running}}
I1124 13:47:35.436407 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:47:35.457824 607669 cli_runner.go:164] Run: docker exec old-k8s-version-513442 stat /var/lib/dpkg/alternatives/iptables
I1124 13:47:35.505936 607669 oci.go:144] the created container "old-k8s-version-513442" has a running status.
I1124 13:47:35.505993 607669 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa...
I1124 13:47:35.536159 607669 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1124 13:47:35.565751 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:47:35.587350 607669 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1124 13:47:35.587376 607669 kic_runner.go:114] Args: [docker exec --privileged old-k8s-version-513442 chown docker:docker /home/docker/.ssh/authorized_keys]
I1124 13:47:35.639485 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:47:35.659275 607669 machine.go:94] provisionDockerMachine start ...
I1124 13:47:35.659377 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:35.682791 607669 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:35.683193 607669 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33435 <nil> <nil>}
I1124 13:47:35.683215 607669 main.go:143] libmachine: About to run SSH command:
hostname
I1124 13:47:35.683887 607669 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:57402->127.0.0.1:33435: read: connection reset by peer
I1124 13:47:38.829345 607669 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-513442
I1124 13:47:38.829376 607669 ubuntu.go:182] provisioning hostname "old-k8s-version-513442"
I1124 13:47:38.829451 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:38.847276 607669 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:38.847521 607669 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33435 <nil> <nil>}
I1124 13:47:38.847540 607669 main.go:143] libmachine: About to run SSH command:
sudo hostname old-k8s-version-513442 && echo "old-k8s-version-513442" | sudo tee /etc/hostname
I1124 13:47:39.005190 607669 main.go:143] libmachine: SSH cmd err, output: <nil>: old-k8s-version-513442
I1124 13:47:39.005277 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.023623 607669 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:39.023848 607669 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33435 <nil> <nil>}
I1124 13:47:39.023866 607669 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sold-k8s-version-513442' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 old-k8s-version-513442/g' /etc/hosts;
else
echo '127.0.1.1 old-k8s-version-513442' | sudo tee -a /etc/hosts;
fi
fi
I1124 13:47:39.170228 607669 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 13:47:39.170266 607669 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21932-370498/.minikube CaCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21932-370498/.minikube}
I1124 13:47:39.170286 607669 ubuntu.go:190] setting up certificates
I1124 13:47:39.170295 607669 provision.go:84] configureAuth start
I1124 13:47:39.170348 607669 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-513442
I1124 13:47:39.189446 607669 provision.go:143] copyHostCerts
I1124 13:47:39.189521 607669 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem, removing ...
I1124 13:47:39.189536 607669 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem
I1124 13:47:39.189619 607669 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem (1082 bytes)
I1124 13:47:39.189751 607669 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem, removing ...
I1124 13:47:39.189764 607669 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem
I1124 13:47:39.189810 607669 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem (1123 bytes)
I1124 13:47:39.189989 607669 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem, removing ...
I1124 13:47:39.190006 607669 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem
I1124 13:47:39.190054 607669 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem (1675 bytes)
I1124 13:47:39.190154 607669 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem org=jenkins.old-k8s-version-513442 san=[127.0.0.1 192.168.94.2 localhost minikube old-k8s-version-513442]
I1124 13:47:39.227079 607669 provision.go:177] copyRemoteCerts
I1124 13:47:39.227139 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 13:47:39.227177 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.244951 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.349311 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1124 13:47:39.371319 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 13:47:39.391311 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes)
I1124 13:47:39.411071 607669 provision.go:87] duration metric: took 240.75737ms to configureAuth
I1124 13:47:39.411102 607669 ubuntu.go:206] setting minikube options for container-runtime
I1124 13:47:39.411303 607669 config.go:182] Loaded profile config "old-k8s-version-513442": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:47:39.411317 607669 machine.go:97] duration metric: took 3.752022568s to provisionDockerMachine
I1124 13:47:39.411325 607669 client.go:176] duration metric: took 8.852661553s to LocalClient.Create
I1124 13:47:39.411358 607669 start.go:167] duration metric: took 8.852720089s to libmachine.API.Create "old-k8s-version-513442"
I1124 13:47:39.411372 607669 start.go:293] postStartSetup for "old-k8s-version-513442" (driver="docker")
I1124 13:47:39.411388 607669 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 13:47:39.411452 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 13:47:39.411508 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.429085 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.536320 607669 ssh_runner.go:195] Run: cat /etc/os-release
I1124 13:47:39.540367 607669 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 13:47:39.540402 607669 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 13:47:39.540414 607669 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/addons for local assets ...
I1124 13:47:39.540470 607669 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/files for local assets ...
I1124 13:47:39.540543 607669 filesync.go:149] local asset: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem -> 3741222.pem in /etc/ssl/certs
I1124 13:47:39.540631 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 13:47:39.549275 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:39.573695 607669 start.go:296] duration metric: took 162.301306ms for postStartSetup
I1124 13:47:39.574191 607669 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-513442
I1124 13:47:39.593438 607669 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/config.json ...
I1124 13:47:39.593801 607669 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 13:47:39.593897 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.615008 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.717288 607669 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 13:47:39.722340 607669 start.go:128] duration metric: took 9.166080327s to createHost
I1124 13:47:39.722370 607669 start.go:83] releasing machines lock for "old-k8s-version-513442", held for 9.166275546s
I1124 13:47:39.722447 607669 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" old-k8s-version-513442
I1124 13:47:39.743680 607669 ssh_runner.go:195] Run: cat /version.json
I1124 13:47:39.743731 607669 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 13:47:39.743745 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.743812 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:47:39.763336 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.763737 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:47:39.929805 607669 ssh_runner.go:195] Run: systemctl --version
I1124 13:47:39.938447 607669 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 13:47:39.944068 607669 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 13:47:39.944147 607669 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 13:47:39.974609 607669 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 13:47:39.974641 607669 start.go:496] detecting cgroup driver to use...
I1124 13:47:39.974679 607669 detect.go:190] detected "systemd" cgroup driver on host os
I1124 13:47:39.974728 607669 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 13:47:39.990824 607669 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 13:47:40.004856 607669 docker.go:218] disabling cri-docker service (if available) ...
I1124 13:47:40.004920 607669 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 13:47:40.024248 607669 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 13:47:40.044433 607669 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 13:47:40.145638 607669 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 13:47:40.247759 607669 docker.go:234] disabling docker service ...
I1124 13:47:40.247829 607669 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 13:47:40.269922 607669 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 13:47:40.284840 607669 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 13:47:40.379978 607669 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 13:47:40.471616 607669 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 13:47:40.485207 607669 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 13:47:40.501980 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I1124 13:47:40.513545 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 13:47:40.524134 607669 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 13:47:40.524215 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 13:47:40.533927 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:40.543474 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 13:47:40.553177 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:40.563129 607669 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 13:47:40.572813 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 13:47:40.583799 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 13:47:40.593872 607669 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 13:47:40.604166 607669 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 13:47:40.612262 607669 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 13:47:40.620472 607669 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:40.706065 607669 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 13:47:40.809269 607669 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 13:47:40.809335 607669 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 13:47:40.814110 607669 start.go:564] Will wait 60s for crictl version
I1124 13:47:40.814187 607669 ssh_runner.go:195] Run: which crictl
I1124 13:47:40.818745 607669 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 13:47:40.843808 607669 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 13:47:40.843877 607669 ssh_runner.go:195] Run: containerd --version
I1124 13:47:40.865477 607669 ssh_runner.go:195] Run: containerd --version
I1124 13:47:40.893673 607669 out.go:179] * Preparing Kubernetes v1.28.0 on containerd 2.1.5 ...
I1124 13:47:36.234464 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:36.234492 572647 cri.go:89] found id: ""
I1124 13:47:36.234504 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:36.234584 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.240249 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:36.240335 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:36.279967 572647 cri.go:89] found id: ""
I1124 13:47:36.279998 572647 logs.go:282] 0 containers: []
W1124 13:47:36.280009 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:36.280027 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:36.280082 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:36.313257 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:36.313286 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:36.313292 572647 cri.go:89] found id: ""
I1124 13:47:36.313302 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:36.313364 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.317818 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.322103 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:36.322170 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:36.352450 572647 cri.go:89] found id: ""
I1124 13:47:36.352485 572647 logs.go:282] 0 containers: []
W1124 13:47:36.352497 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:36.352506 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:36.352569 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:36.381849 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:36.381876 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:36.381881 572647 cri.go:89] found id: ""
I1124 13:47:36.381896 572647 logs.go:282] 2 containers: [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:36.381995 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.386540 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:36.391244 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:36.391326 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:36.425813 572647 cri.go:89] found id: ""
I1124 13:47:36.425845 572647 logs.go:282] 0 containers: []
W1124 13:47:36.425856 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:36.425864 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:36.425945 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:36.461097 572647 cri.go:89] found id: ""
I1124 13:47:36.461127 572647 logs.go:282] 0 containers: []
W1124 13:47:36.461139 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:36.461153 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:36.461172 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:36.499983 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:36.500029 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:36.521192 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:36.521223 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:36.557807 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:36.557859 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:36.611092 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:36.611122 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:36.647506 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:36.647538 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:36.773107 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:36.773142 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:47:36.847612 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:47:36.847637 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:36.847662 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:36.887116 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:36.887154 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:36.924700 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:36.924746 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:36.974655 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:36.974689 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:37.017086 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:37.017118 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:39.548013 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:39.548547 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:47:39.548616 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:39.548676 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:39.577831 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:39.577852 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:39.577857 572647 cri.go:89] found id: ""
I1124 13:47:39.577867 572647 logs.go:282] 2 containers: [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:39.577947 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.582354 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.586625 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:39.586710 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:39.614522 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:39.614543 572647 cri.go:89] found id: ""
I1124 13:47:39.614552 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:39.614607 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.619054 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:39.619127 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:39.646326 572647 cri.go:89] found id: ""
I1124 13:47:39.646352 572647 logs.go:282] 0 containers: []
W1124 13:47:39.646363 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:39.646370 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:39.646429 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:39.672725 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:39.672745 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:39.672749 572647 cri.go:89] found id: ""
I1124 13:47:39.672757 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:39.672814 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.677191 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.681175 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:39.681258 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:39.708431 572647 cri.go:89] found id: ""
I1124 13:47:39.708455 572647 logs.go:282] 0 containers: []
W1124 13:47:39.708464 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:39.708470 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:39.708519 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:39.740642 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:39.740666 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:39.740672 572647 cri.go:89] found id: ""
I1124 13:47:39.740682 572647 logs.go:282] 2 containers: [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:39.740749 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.745558 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:39.749963 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:39.750090 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:39.785165 572647 cri.go:89] found id: ""
I1124 13:47:39.785200 572647 logs.go:282] 0 containers: []
W1124 13:47:39.785213 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:39.785223 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:39.785297 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:39.816314 572647 cri.go:89] found id: ""
I1124 13:47:39.816344 572647 logs.go:282] 0 containers: []
W1124 13:47:39.816356 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:39.816369 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:39.816386 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:39.855047 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:39.855082 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:39.884850 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:39.884886 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:39.923160 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:39.923209 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:40.011551 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:40.011587 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:40.028754 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:40.028784 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:40.073406 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:40.073463 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:40.118088 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:40.118130 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:47:40.186938 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:47:40.186963 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:40.186979 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:40.225544 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:40.225575 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:40.264167 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:40.264212 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:40.310248 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:40.310285 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:40.101111 608917 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-608395
I1124 13:47:40.101196 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.122644 608917 main.go:143] libmachine: Using SSH client type: native
I1124 13:47:40.122921 608917 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33441 <nil> <nil>}
I1124 13:47:40.122949 608917 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-608395' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-608395/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-608395' | sudo tee -a /etc/hosts;
fi
fi
I1124 13:47:40.280196 608917 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1124 13:47:40.280226 608917 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21932-370498/.minikube CaCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21932-370498/.minikube}
I1124 13:47:40.280268 608917 ubuntu.go:190] setting up certificates
I1124 13:47:40.280293 608917 provision.go:84] configureAuth start
I1124 13:47:40.280380 608917 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-608395
I1124 13:47:40.303469 608917 provision.go:143] copyHostCerts
I1124 13:47:40.303532 608917 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem, removing ...
I1124 13:47:40.303543 608917 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem
I1124 13:47:40.303590 608917 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/ca.pem (1082 bytes)
I1124 13:47:40.303726 608917 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem, removing ...
I1124 13:47:40.303739 608917 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem
I1124 13:47:40.303772 608917 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/cert.pem (1123 bytes)
I1124 13:47:40.303856 608917 exec_runner.go:144] found /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem, removing ...
I1124 13:47:40.303868 608917 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem
I1124 13:47:40.303892 608917 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21932-370498/.minikube/key.pem (1675 bytes)
I1124 13:47:40.303983 608917 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem org=jenkins.no-preload-608395 san=[127.0.0.1 192.168.103.2 localhost minikube no-preload-608395]
I1124 13:47:40.375070 608917 provision.go:177] copyRemoteCerts
I1124 13:47:40.375131 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1124 13:47:40.375180 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.394610 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.501959 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1124 13:47:40.523137 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1124 13:47:40.542279 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1124 13:47:40.562226 608917 provision.go:87] duration metric: took 281.905194ms to configureAuth
I1124 13:47:40.562265 608917 ubuntu.go:206] setting minikube options for container-runtime
I1124 13:47:40.562572 608917 config.go:182] Loaded profile config "no-preload-608395": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:47:40.562595 608917 machine.go:97] duration metric: took 3.832793094s to provisionDockerMachine
I1124 13:47:40.562604 608917 client.go:176] duration metric: took 5.273718281s to LocalClient.Create
I1124 13:47:40.562649 608917 start.go:167] duration metric: took 5.273809151s to libmachine.API.Create "no-preload-608395"
I1124 13:47:40.562659 608917 start.go:293] postStartSetup for "no-preload-608395" (driver="docker")
I1124 13:47:40.562671 608917 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1124 13:47:40.562721 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1124 13:47:40.562769 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.582715 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.688873 608917 ssh_runner.go:195] Run: cat /etc/os-release
I1124 13:47:40.692683 608917 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1124 13:47:40.692717 608917 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1124 13:47:40.692818 608917 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/addons for local assets ...
I1124 13:47:40.692947 608917 filesync.go:126] Scanning /home/jenkins/minikube-integration/21932-370498/.minikube/files for local assets ...
I1124 13:47:40.693078 608917 filesync.go:149] local asset: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem -> 3741222.pem in /etc/ssl/certs
I1124 13:47:40.693208 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1124 13:47:40.702139 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:40.725883 608917 start.go:296] duration metric: took 163.205649ms for postStartSetup
I1124 13:47:40.726376 608917 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-608395
I1124 13:47:40.744526 608917 profile.go:143] Saving config to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/config.json ...
I1124 13:47:40.745022 608917 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1124 13:47:40.745098 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.763260 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.869180 608917 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1124 13:47:40.874423 608917 start.go:128] duration metric: took 5.58807074s to createHost
I1124 13:47:40.874458 608917 start.go:83] releasing machines lock for "no-preload-608395", held for 5.58825096s
I1124 13:47:40.874540 608917 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-608395
I1124 13:47:40.896709 608917 ssh_runner.go:195] Run: cat /version.json
I1124 13:47:40.896763 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.896807 608917 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1124 13:47:40.896904 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:47:40.918859 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:40.920576 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:47:41.084454 608917 ssh_runner.go:195] Run: systemctl --version
I1124 13:47:41.091582 608917 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1124 13:47:41.097406 608917 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1124 13:47:41.097478 608917 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1124 13:47:41.125540 608917 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1124 13:47:41.125566 608917 start.go:496] detecting cgroup driver to use...
I1124 13:47:41.125601 608917 detect.go:190] detected "systemd" cgroup driver on host os
I1124 13:47:41.125650 608917 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1124 13:47:41.148294 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1124 13:47:41.167664 608917 docker.go:218] disabling cri-docker service (if available) ...
I1124 13:47:41.167740 608917 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1124 13:47:41.189235 608917 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1124 13:47:41.213594 608917 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1124 13:47:41.336134 608917 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1124 13:47:41.426955 608917 docker.go:234] disabling docker service ...
I1124 13:47:41.427023 608917 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1124 13:47:41.448189 608917 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1124 13:47:41.462073 608917 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1124 13:47:41.548298 608917 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1124 13:47:41.635202 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1124 13:47:41.649149 608917 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1124 13:47:41.664451 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1124 13:47:41.676460 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1124 13:47:41.686131 608917 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1124 13:47:41.686199 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1124 13:47:41.695720 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:41.705503 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1124 13:47:41.714879 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1124 13:47:41.724369 608917 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1124 13:47:41.733131 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1124 13:47:41.742525 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1124 13:47:41.751826 608917 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1124 13:47:41.762473 608917 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1124 13:47:41.770755 608917 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1124 13:47:41.779154 608917 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:41.869150 608917 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1124 13:47:41.957807 608917 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1124 13:47:41.957876 608917 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1124 13:47:41.965431 608917 start.go:564] Will wait 60s for crictl version
I1124 13:47:41.965500 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:41.970973 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1124 13:47:42.001317 608917 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1124 13:47:42.001405 608917 ssh_runner.go:195] Run: containerd --version
I1124 13:47:42.026320 608917 ssh_runner.go:195] Run: containerd --version
I1124 13:47:42.052318 608917 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1124 13:47:40.896022 607669 cli_runner.go:164] Run: docker network inspect old-k8s-version-513442 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:40.918522 607669 ssh_runner.go:195] Run: grep 192.168.94.1 host.minikube.internal$ /etc/hosts
I1124 13:47:40.923315 607669 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.94.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:40.935781 607669 kubeadm.go:884] updating cluster {Name:old-k8s-version-513442 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-513442 Namespace:default APIServerHAVIP: APIServerName:minik
ubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cu
stomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 13:47:40.935932 607669 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime containerd
I1124 13:47:40.935998 607669 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:47:40.965650 607669 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:47:40.965689 607669 containerd.go:534] Images already preloaded, skipping extraction
I1124 13:47:40.965773 607669 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:47:40.999412 607669 containerd.go:627] all images are preloaded for containerd runtime.
I1124 13:47:40.999441 607669 cache_images.go:86] Images are preloaded, skipping loading
I1124 13:47:40.999451 607669 kubeadm.go:935] updating node { 192.168.94.2 8443 v1.28.0 containerd true true} ...
I1124 13:47:40.999568 607669 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=old-k8s-version-513442 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2
[Install]
config:
{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-513442 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 13:47:40.999640 607669 ssh_runner.go:195] Run: sudo crictl info
I1124 13:47:41.030216 607669 cni.go:84] Creating CNI manager for ""
I1124 13:47:41.030250 607669 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:41.030273 607669 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 13:47:41.030304 607669 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.94.2 APIServerPort:8443 KubernetesVersion:v1.28.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:old-k8s-version-513442 NodeName:old-k8s-version-513442 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.94.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.94.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 13:47:41.030479 607669 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.94.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "old-k8s-version-513442"
kubeletExtraArgs:
node-ip: 192.168.94.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.94.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 13:47:41.030593 607669 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.0
I1124 13:47:41.040496 607669 binaries.go:51] Found k8s binaries, skipping transfer
I1124 13:47:41.040574 607669 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 13:47:41.048965 607669 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (326 bytes)
I1124 13:47:41.063246 607669 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 13:47:41.080199 607669 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2175 bytes)
I1124 13:47:41.095141 607669 ssh_runner.go:195] Run: grep 192.168.94.2 control-plane.minikube.internal$ /etc/hosts
I1124 13:47:41.099735 607669 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.94.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:41.111816 607669 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:41.205774 607669 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:47:41.229647 607669 certs.go:69] Setting up /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442 for IP: 192.168.94.2
I1124 13:47:41.229678 607669 certs.go:195] generating shared ca certs ...
I1124 13:47:41.229702 607669 certs.go:227] acquiring lock for ca certs: {Name:mk5874497fda855b1e2ff816147ffdfbc44946ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.229867 607669 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key
I1124 13:47:41.229906 607669 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key
I1124 13:47:41.229935 607669 certs.go:257] generating profile certs ...
I1124 13:47:41.230010 607669 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.key
I1124 13:47:41.230025 607669 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.crt with IP's: []
I1124 13:47:41.438692 607669 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.crt ...
I1124 13:47:41.438735 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.crt: {Name:mkbb44e092f1569b20ffeeea6d19871e0c7ea39c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.438903 607669 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.key ...
I1124 13:47:41.438942 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/client.key: {Name:mkcdbea7ce1dc4681fc91bbc4b78d2c028c94687 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.439100 607669 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4
I1124 13:47:41.439127 607669 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.94.2]
I1124 13:47:41.518895 607669 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4 ...
I1124 13:47:41.518941 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4: {Name:mk47b90333d21f736ed33504f6da28b133242551 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.519134 607669 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4 ...
I1124 13:47:41.519153 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4: {Name:mk4592466df77ceb7a68fa27e5f9a0201b1a8063 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.519239 607669 certs.go:382] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt.eabc0cb4 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt
I1124 13:47:41.519312 607669 certs.go:386] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key.eabc0cb4 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key
I1124 13:47:41.519368 607669 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key
I1124 13:47:41.519388 607669 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt with IP's: []
I1124 13:47:41.757186 607669 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt ...
I1124 13:47:41.757217 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt: {Name:mkb434108adbee544176aebf04c9ed8a63b76175 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.757418 607669 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key ...
I1124 13:47:41.757442 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key: {Name:mk640e3789cee888121bd6cc947590ae24e90dd5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:41.757683 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem (1338 bytes)
W1124 13:47:41.757725 607669 certs.go:480] ignoring /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122_empty.pem, impossibly tiny 0 bytes
I1124 13:47:41.757736 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem (1679 bytes)
I1124 13:47:41.757777 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem (1082 bytes)
I1124 13:47:41.757814 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem (1123 bytes)
I1124 13:47:41.757849 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem (1675 bytes)
I1124 13:47:41.757940 607669 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:41.758610 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 13:47:41.778634 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 13:47:41.799349 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 13:47:41.825279 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 13:47:41.844900 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1124 13:47:41.865036 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 13:47:41.887428 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 13:47:41.912645 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/old-k8s-version-513442/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1124 13:47:41.937284 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 13:47:41.966303 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem --> /usr/share/ca-certificates/374122.pem (1338 bytes)
I1124 13:47:41.989056 607669 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /usr/share/ca-certificates/3741222.pem (1708 bytes)
I1124 13:47:42.011989 607669 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 13:47:42.027976 607669 ssh_runner.go:195] Run: openssl version
I1124 13:47:42.036340 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/3741222.pem && ln -fs /usr/share/ca-certificates/3741222.pem /etc/ssl/certs/3741222.pem"
I1124 13:47:42.046698 607669 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/3741222.pem
I1124 13:47:42.051406 607669 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 13:20 /usr/share/ca-certificates/3741222.pem
I1124 13:47:42.051481 607669 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/3741222.pem
I1124 13:47:42.089903 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/3741222.pem /etc/ssl/certs/3ec20f2e.0"
I1124 13:47:42.100357 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 13:47:42.110986 607669 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:42.115955 607669 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 13:14 /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:42.116031 607669 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:42.153310 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 13:47:42.163209 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/374122.pem && ln -fs /usr/share/ca-certificates/374122.pem /etc/ssl/certs/374122.pem"
I1124 13:47:42.173625 607669 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/374122.pem
I1124 13:47:42.178229 607669 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 13:20 /usr/share/ca-certificates/374122.pem
I1124 13:47:42.178308 607669 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/374122.pem
I1124 13:47:42.216281 607669 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/374122.pem /etc/ssl/certs/51391683.0"
I1124 13:47:42.228415 607669 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 13:47:42.232854 607669 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 13:47:42.232959 607669 kubeadm.go:401] StartCluster: {Name:old-k8s-version-513442 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:true NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:old-k8s-version-513442 Namespace:default APIServerHAVIP: APIServerName:minikube
CA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Custo
mQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:47:42.233058 607669 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 13:47:42.233119 607669 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 13:47:42.262130 607669 cri.go:89] found id: ""
I1124 13:47:42.262225 607669 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 13:47:42.271622 607669 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 13:47:42.280568 607669 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 13:47:42.280637 607669 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 13:47:42.289222 607669 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 13:47:42.289241 607669 kubeadm.go:158] found existing configuration files:
I1124 13:47:42.289287 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 13:47:42.297481 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 13:47:42.297560 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 13:47:42.306305 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 13:47:42.315150 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 13:47:42.315224 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 13:47:42.324595 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 13:47:42.333840 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 13:47:42.333922 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 13:47:42.344021 607669 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 13:47:42.355171 607669 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 13:47:42.355226 607669 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 13:47:42.364345 607669 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.28.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 13:47:42.433190 607669 kubeadm.go:319] [init] Using Kubernetes version: v1.28.0
I1124 13:47:42.433270 607669 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 13:47:42.487608 607669 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 13:47:42.487695 607669 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 13:47:42.487758 607669 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 13:47:42.487823 607669 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 13:47:42.487892 607669 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 13:47:42.487986 607669 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 13:47:42.488057 607669 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 13:47:42.488125 607669 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 13:47:42.488216 607669 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 13:47:42.488285 607669 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 13:47:42.488352 607669 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 13:47:42.585565 607669 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 13:47:42.585750 607669 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 13:47:42.585896 607669 kubeadm.go:319] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I1124 13:47:42.762435 607669 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 13:47:42.054673 608917 cli_runner.go:164] Run: docker network inspect no-preload-608395 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1124 13:47:42.073094 608917 ssh_runner.go:195] Run: grep 192.168.103.1 host.minikube.internal$ /etc/hosts
I1124 13:47:42.078208 608917 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.103.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:42.089858 608917 kubeadm.go:884] updating cluster {Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemu
FirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1124 13:47:42.090126 608917 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1124 13:47:42.090181 608917 ssh_runner.go:195] Run: sudo crictl images --output json
I1124 13:47:42.117576 608917 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1124 13:47:42.117601 608917 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1124 13:47:42.117671 608917 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:42.117683 608917 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.117696 608917 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1124 13:47:42.117708 608917 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.117683 608917 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.117737 608917 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.117738 608917 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.117773 608917 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.119957 608917 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.120028 608917 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.120041 608917 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.120103 608917 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.120144 608917 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.120206 608917 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1124 13:47:42.120361 608917 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:42.120651 608917 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.324599 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1124 13:47:42.324658 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.329752 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1124 13:47:42.329811 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1124 13:47:42.340410 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1124 13:47:42.340483 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.345994 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1124 13:47:42.346082 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.350632 608917 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1124 13:47:42.350771 608917 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.350861 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.354889 608917 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1124 13:47:42.355021 608917 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1124 13:47:42.355078 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.365506 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1124 13:47:42.365584 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.370164 608917 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1124 13:47:42.370246 608917 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.370299 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.371573 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.371569 608917 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1124 13:47:42.371633 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 13:47:42.371663 608917 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.371700 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.383984 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1124 13:47:42.384064 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.391339 608917 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1124 13:47:42.391424 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.394058 608917 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1124 13:47:42.394107 608917 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.394139 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.394173 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.394139 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.410796 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 13:47:42.412029 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.415223 608917 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1124 13:47:42.415273 608917 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.415318 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.430558 608917 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1124 13:47:42.430610 608917 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.430661 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:42.432115 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.432240 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.432710 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.449068 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1124 13:47:42.451309 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.451333 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.451434 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1124 13:47:42.471426 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1124 13:47:42.471426 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.472006 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1124 13:47:42.507575 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1124 13:47:42.507696 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1124 13:47:42.507737 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.507752 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.507776 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1124 13:47:42.507812 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1124 13:47:42.512031 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1124 13:47:42.512160 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1124 13:47:42.512183 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1124 13:47:42.512220 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1124 13:47:42.512281 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1124 13:47:42.542249 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1124 13:47:42.542293 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1124 13:47:42.542356 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1124 13:47:42.542419 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1124 13:47:42.542436 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1124 13:47:42.542450 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1124 13:47:42.542460 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1124 13:47:42.542482 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1124 13:47:42.542522 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1124 13:47:42.542541 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1124 13:47:42.547506 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1124 13:47:42.547609 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1124 13:47:42.591222 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1124 13:47:42.591265 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1124 13:47:42.591339 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1124 13:47:42.591358 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1124 13:47:42.630891 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1124 13:47:42.630960 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1124 13:47:42.635881 608917 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1124 13:47:42.635984 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1124 13:47:42.696822 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1124 13:47:42.696868 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1124 13:47:42.696964 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1124 13:47:42.696987 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1124 13:47:42.855586 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1124 13:47:43.017613 608917 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1124 13:47:43.017692 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1124 13:47:43.363331 608917 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1124 13:47:43.363429 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:44.322473 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.304751727s)
I1124 13:47:44.322506 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1124 13:47:44.322534 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1124 13:47:44.322535 608917 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1124 13:47:44.322572 608917 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:44.322581 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1124 13:47:44.322611 608917 ssh_runner.go:195] Run: which crictl
I1124 13:47:44.327186 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:42.765072 607669 out.go:252] - Generating certificates and keys ...
I1124 13:47:42.765189 607669 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 13:47:42.765429 607669 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 13:47:42.918631 607669 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 13:47:43.145530 607669 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 13:47:43.262863 607669 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 13:47:43.516853 607669 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 13:47:43.680193 607669 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 13:47:43.680382 607669 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost old-k8s-version-513442] and IPs [192.168.94.2 127.0.0.1 ::1]
I1124 13:47:43.927450 607669 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 13:47:43.927668 607669 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost old-k8s-version-513442] and IPs [192.168.94.2 127.0.0.1 ::1]
I1124 13:47:44.210866 607669 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 13:47:44.444469 607669 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 13:47:44.571652 607669 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 13:47:44.571791 607669 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 13:47:44.658495 607669 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 13:47:44.899827 607669 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 13:47:45.259836 607669 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 13:47:45.407067 607669 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 13:47:45.407645 607669 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 13:47:45.412109 607669 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 13:47:42.868629 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:47:45.407011 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.084400483s)
I1124 13:47:45.407048 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1124 13:47:45.407074 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1124 13:47:45.407121 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1124 13:47:45.407011 608917 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.079785919s)
I1124 13:47:45.407225 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:46.754417 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.347254819s)
I1124 13:47:46.754464 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1124 13:47:46.754487 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1124 13:47:46.754539 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1124 13:47:46.754423 608917 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.34716741s)
I1124 13:47:46.754625 608917 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:47:46.791381 608917 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1124 13:47:46.791500 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1124 13:47:48.250258 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1: (1.49567347s)
I1124 13:47:48.250293 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1124 13:47:48.250322 608917 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1124 13:47:48.250369 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1124 13:47:48.250393 608917 ssh_runner.go:235] Completed: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: (1.458859359s)
I1124 13:47:48.250436 608917 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1124 13:47:48.250458 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1124 13:47:49.525346 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.274952475s)
I1124 13:47:49.525372 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1124 13:47:49.525397 608917 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1124 13:47:49.525432 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1124 13:47:45.413783 607669 out.go:252] - Booting up control plane ...
I1124 13:47:45.414000 607669 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 13:47:45.414122 607669 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 13:47:45.415606 607669 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 13:47:45.433197 607669 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 13:47:45.434777 607669 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 13:47:45.434850 607669 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 13:47:45.555124 607669 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1124 13:47:47.870054 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1124 13:47:47.870131 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:47:47.870207 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:47:47.909612 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:47:47.909637 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:47.909644 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:47:47.909649 572647 cri.go:89] found id: ""
I1124 13:47:47.909660 572647 logs.go:282] 3 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:47:47.909721 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.915163 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.920826 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.926251 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:47:47.926326 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:47:47.968362 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:47.968399 572647 cri.go:89] found id: ""
I1124 13:47:47.968412 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:47:47.968487 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:47.973840 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:47:47.973955 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:47:48.011120 572647 cri.go:89] found id: ""
I1124 13:47:48.011151 572647 logs.go:282] 0 containers: []
W1124 13:47:48.011163 572647 logs.go:284] No container was found matching "coredns"
I1124 13:47:48.011172 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:47:48.011242 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:47:48.049409 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:48.049433 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:48.049439 572647 cri.go:89] found id: ""
I1124 13:47:48.049449 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:47:48.049612 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.055041 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.061717 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:47:48.061795 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:47:48.098008 572647 cri.go:89] found id: ""
I1124 13:47:48.098036 572647 logs.go:282] 0 containers: []
W1124 13:47:48.098048 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:47:48.098056 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:47:48.098116 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:47:48.134832 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:47:48.134858 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:48.134864 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:48.134868 572647 cri.go:89] found id: ""
I1124 13:47:48.134879 572647 logs.go:282] 3 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:47:48.134960 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.140512 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.146067 572647 ssh_runner.go:195] Run: which crictl
I1124 13:47:48.151167 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:47:48.151293 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:47:48.194241 572647 cri.go:89] found id: ""
I1124 13:47:48.194275 572647 logs.go:282] 0 containers: []
W1124 13:47:48.194287 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:47:48.194297 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:47:48.194366 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:47:48.235586 572647 cri.go:89] found id: ""
I1124 13:47:48.235617 572647 logs.go:282] 0 containers: []
W1124 13:47:48.235629 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:47:48.235644 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:47:48.235660 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:47:48.322131 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:47:48.322175 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:47:48.358925 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:47:48.358964 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:47:48.399403 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:47:48.399439 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:47:48.442576 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:47:48.442621 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:47:48.490297 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:47:48.490336 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:47:48.543239 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:47:48.543277 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:47:48.591561 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:47:48.591604 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:47:48.639975 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:47:48.640012 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:47:48.703335 572647 logs.go:123] Gathering logs for container status ...
I1124 13:47:48.703393 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:47:48.760778 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:47:48.760820 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:47:48.887283 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:47:48.887328 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:47:48.915138 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:47:48.915177 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1124 13:47:50.557442 607669 kubeadm.go:319] [apiclient] All control plane components are healthy after 5.002632 seconds
I1124 13:47:50.557627 607669 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 13:47:50.572390 607669 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 13:47:51.098533 607669 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 13:47:51.098764 607669 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-513442 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 13:47:51.610053 607669 kubeadm.go:319] [bootstrap-token] Using token: eki30b.4i7191y9601t9kqb
I1124 13:47:51.611988 607669 out.go:252] - Configuring RBAC rules ...
I1124 13:47:51.612142 607669 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 13:47:51.618056 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 13:47:51.627751 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 13:47:51.631902 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 13:47:51.635666 607669 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 13:47:51.643042 607669 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 13:47:51.655046 607669 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 13:47:51.879254 607669 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 13:47:52.022857 607669 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 13:47:52.024273 607669 kubeadm.go:319]
I1124 13:47:52.024439 607669 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 13:47:52.024451 607669 kubeadm.go:319]
I1124 13:47:52.024565 607669 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 13:47:52.024593 607669 kubeadm.go:319]
I1124 13:47:52.024628 607669 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 13:47:52.024712 607669 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 13:47:52.024786 607669 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 13:47:52.024795 607669 kubeadm.go:319]
I1124 13:47:52.024870 607669 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 13:47:52.024880 607669 kubeadm.go:319]
I1124 13:47:52.024984 607669 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 13:47:52.024995 607669 kubeadm.go:319]
I1124 13:47:52.025066 607669 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 13:47:52.025175 607669 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 13:47:52.025273 607669 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 13:47:52.025282 607669 kubeadm.go:319]
I1124 13:47:52.025399 607669 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 13:47:52.025508 607669 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 13:47:52.025517 607669 kubeadm.go:319]
I1124 13:47:52.025633 607669 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token eki30b.4i7191y9601t9kqb \
I1124 13:47:52.025782 607669 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c \
I1124 13:47:52.025814 607669 kubeadm.go:319] --control-plane
I1124 13:47:52.025823 607669 kubeadm.go:319]
I1124 13:47:52.025955 607669 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 13:47:52.025964 607669 kubeadm.go:319]
I1124 13:47:52.026081 607669 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token eki30b.4i7191y9601t9kqb \
I1124 13:47:52.026226 607669 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c
I1124 13:47:52.029215 607669 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 13:47:52.029395 607669 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 13:47:52.029436 607669 cni.go:84] Creating CNI manager for ""
I1124 13:47:52.029450 607669 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:52.032075 607669 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 13:47:52.378094 608917 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.852631537s)
I1124 13:47:52.378131 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1124 13:47:52.378164 608917 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1124 13:47:52.378216 608917 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1124 13:47:52.826755 608917 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21932-370498/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1124 13:47:52.826808 608917 cache_images.go:125] Successfully loaded all cached images
I1124 13:47:52.826816 608917 cache_images.go:94] duration metric: took 10.70919772s to LoadCachedImages
I1124 13:47:52.826831 608917 kubeadm.go:935] updating node { 192.168.103.2 8443 v1.34.1 containerd true true} ...
I1124 13:47:52.826984 608917 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-608395 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1124 13:47:52.827057 608917 ssh_runner.go:195] Run: sudo crictl info
I1124 13:47:52.858503 608917 cni.go:84] Creating CNI manager for ""
I1124 13:47:52.858531 608917 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:47:52.858557 608917 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1124 13:47:52.858588 608917 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.103.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-608395 NodeName:no-preload-608395 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.103.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.103.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1124 13:47:52.858757 608917 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.103.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-608395"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.103.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.103.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1124 13:47:52.858835 608917 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1124 13:47:52.869416 608917 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1124 13:47:52.869483 608917 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1124 13:47:52.881260 608917 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1124 13:47:52.881274 608917 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1124 13:47:52.881284 608917 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1124 13:47:52.881370 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1124 13:47:52.886648 608917 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1124 13:47:52.886683 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1124 13:47:53.829310 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 13:47:53.844364 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1124 13:47:53.848663 608917 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1124 13:47:53.848703 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1124 13:47:54.078871 608917 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1124 13:47:54.083904 608917 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1124 13:47:54.083971 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1124 13:47:54.263727 608917 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1124 13:47:54.272819 608917 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1124 13:47:54.287533 608917 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1124 13:47:54.307319 608917 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2232 bytes)
I1124 13:47:54.321728 608917 ssh_runner.go:195] Run: grep 192.168.103.2 control-plane.minikube.internal$ /etc/hosts
I1124 13:47:54.326108 608917 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.103.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1124 13:47:54.337568 608917 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:47:54.423252 608917 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:47:54.446892 608917 certs.go:69] Setting up /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395 for IP: 192.168.103.2
I1124 13:47:54.446932 608917 certs.go:195] generating shared ca certs ...
I1124 13:47:54.446950 608917 certs.go:227] acquiring lock for ca certs: {Name:mk5874497fda855b1e2ff816147ffdfbc44946ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.447115 608917 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key
I1124 13:47:54.447173 608917 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key
I1124 13:47:54.447189 608917 certs.go:257] generating profile certs ...
I1124 13:47:54.447250 608917 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.key
I1124 13:47:54.447265 608917 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.crt with IP's: []
I1124 13:47:54.480111 608917 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.crt ...
I1124 13:47:54.480143 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.crt: {Name:mk0373d89f453529126dca865f8c4273a9b76c80 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.480318 608917 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.key ...
I1124 13:47:54.480326 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/client.key: {Name:mkd4fd6c97a850045d4415dcd6682504ca05b6b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.480412 608917 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0
I1124 13:47:54.480432 608917 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.103.2]
I1124 13:47:54.564575 608917 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0 ...
I1124 13:47:54.564606 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0: {Name:mk39921501aaa8b9dfdaa0c59584189fbc232834 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.564812 608917 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0 ...
I1124 13:47:54.564832 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0: {Name:mk1e5ec23cae444088ab39a7c9f4bd7f0b68695e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.565002 608917 certs.go:382] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt.211f6cd0 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt
I1124 13:47:54.565092 608917 certs.go:386] copying /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key.211f6cd0 -> /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key
I1124 13:47:54.565147 608917 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key
I1124 13:47:54.565166 608917 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt with IP's: []
I1124 13:47:54.682010 608917 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt ...
I1124 13:47:54.682042 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt: {Name:mk61707e6277a856c1f1cee667479489cd8cfc56 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.682251 608917 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key ...
I1124 13:47:54.682270 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key: {Name:mkdc07f88aff1f58330c9757ac629acf2062c9ed Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:47:54.682520 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem (1338 bytes)
W1124 13:47:54.682564 608917 certs.go:480] ignoring /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122_empty.pem, impossibly tiny 0 bytes
I1124 13:47:54.682574 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca-key.pem (1679 bytes)
I1124 13:47:54.682602 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/ca.pem (1082 bytes)
I1124 13:47:54.682626 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/cert.pem (1123 bytes)
I1124 13:47:54.682651 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/certs/key.pem (1675 bytes)
I1124 13:47:54.682697 608917 certs.go:484] found cert: /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem (1708 bytes)
I1124 13:47:54.683371 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1124 13:47:54.703387 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1124 13:47:54.722770 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1124 13:47:54.743107 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1124 13:47:54.763697 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1124 13:47:54.783164 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1124 13:47:54.802752 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1124 13:47:54.822653 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/profiles/no-preload-608395/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1124 13:47:54.843126 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/certs/374122.pem --> /usr/share/ca-certificates/374122.pem (1338 bytes)
I1124 13:47:54.867619 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/files/etc/ssl/certs/3741222.pem --> /usr/share/ca-certificates/3741222.pem (1708 bytes)
I1124 13:47:54.887814 608917 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21932-370498/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1124 13:47:54.907876 608917 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1124 13:47:54.922379 608917 ssh_runner.go:195] Run: openssl version
I1124 13:47:54.929636 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/374122.pem && ln -fs /usr/share/ca-certificates/374122.pem /etc/ssl/certs/374122.pem"
I1124 13:47:54.940237 608917 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/374122.pem
I1124 13:47:54.944856 608917 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 24 13:20 /usr/share/ca-certificates/374122.pem
I1124 13:47:54.944961 608917 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/374122.pem
I1124 13:47:54.983788 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/374122.pem /etc/ssl/certs/51391683.0"
I1124 13:47:54.994031 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/3741222.pem && ln -fs /usr/share/ca-certificates/3741222.pem /etc/ssl/certs/3741222.pem"
I1124 13:47:55.004849 608917 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/3741222.pem
I1124 13:47:55.010168 608917 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 24 13:20 /usr/share/ca-certificates/3741222.pem
I1124 13:47:55.010231 608917 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/3741222.pem
I1124 13:47:55.048930 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/3741222.pem /etc/ssl/certs/3ec20f2e.0"
I1124 13:47:55.058618 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1124 13:47:55.068496 608917 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:52.033462 607669 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 13:47:52.040052 607669 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1124 13:47:52.040080 607669 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 13:47:52.058896 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 13:47:52.863538 607669 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 13:47:52.863612 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:52.863691 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-513442 minikube.k8s.io/updated_at=2025_11_24T13_47_52_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab minikube.k8s.io/name=old-k8s-version-513442 minikube.k8s.io/primary=true
I1124 13:47:52.876635 607669 ops.go:34] apiserver oom_adj: -16
I1124 13:47:52.948231 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:53.449196 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:53.948546 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:54.448277 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:54.949098 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:55.073505 608917 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 24 13:14 /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:55.073568 608917 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1124 13:47:55.110353 608917 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1124 13:47:55.120226 608917 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1124 13:47:55.124508 608917 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1124 13:47:55.124574 608917 kubeadm.go:401] StartCluster: {Name:no-preload-608395 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-608395 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFir
mwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1124 13:47:55.124676 608917 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1124 13:47:55.124734 608917 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1124 13:47:55.153610 608917 cri.go:89] found id: ""
I1124 13:47:55.153686 608917 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1124 13:47:55.163237 608917 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1124 13:47:55.172281 608917 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1124 13:47:55.172352 608917 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1124 13:47:55.181432 608917 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1124 13:47:55.181458 608917 kubeadm.go:158] found existing configuration files:
I1124 13:47:55.181515 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1124 13:47:55.190814 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1124 13:47:55.190897 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1124 13:47:55.200577 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1124 13:47:55.210272 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1124 13:47:55.210344 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1124 13:47:55.219990 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1124 13:47:55.228828 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1124 13:47:55.228885 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1124 13:47:55.238104 608917 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1124 13:47:55.246631 608917 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1124 13:47:55.246745 608917 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1124 13:47:55.255509 608917 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1124 13:47:55.316154 608917 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1124 13:47:55.376542 608917 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1124 13:47:55.448626 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:55.949156 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:56.449055 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:56.949140 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:57.448946 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:57.948732 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:58.448437 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:58.948803 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:59.449172 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:59.948946 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:47:59.001079 572647 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.085873793s)
W1124 13:47:59.001127 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1124 13:47:59.001145 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:47:59.001163 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:00.448856 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:00.948957 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:01.448664 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:01.948985 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:02.448486 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:02.948890 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:03.448380 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:03.948515 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:04.448564 607669 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:04.527535 607669 kubeadm.go:1114] duration metric: took 11.66399569s to wait for elevateKubeSystemPrivileges
I1124 13:48:04.527576 607669 kubeadm.go:403] duration metric: took 22.29462596s to StartCluster
I1124 13:48:04.527612 607669 settings.go:142] acquiring lock: {Name:mka599a3c9bae62ffb84d261186583052ce40f68 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:04.527702 607669 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21932-370498/kubeconfig
I1124 13:48:04.529054 607669 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/kubeconfig: {Name:mk44e8f04ffd8592063c19ad1e339ad14aaa66a2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:04.529299 607669 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 13:48:04.529306 607669 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.94.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:48:04.529383 607669 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 13:48:04.529498 607669 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-513442"
I1124 13:48:04.529517 607669 config.go:182] Loaded profile config "old-k8s-version-513442": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1124 13:48:04.529519 607669 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-513442"
I1124 13:48:04.529535 607669 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-513442"
I1124 13:48:04.529561 607669 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-513442"
I1124 13:48:04.529641 607669 host.go:66] Checking if "old-k8s-version-513442" exists ...
I1124 13:48:04.529946 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:48:04.530180 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:48:04.531152 607669 out.go:179] * Verifying Kubernetes components...
I1124 13:48:04.532717 607669 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:48:04.557008 607669 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:48:04.558405 607669 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:04.558429 607669 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 13:48:04.558495 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:48:04.562314 607669 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-513442"
I1124 13:48:04.562381 607669 host.go:66] Checking if "old-k8s-version-513442" exists ...
I1124 13:48:04.563175 607669 cli_runner.go:164] Run: docker container inspect old-k8s-version-513442 --format={{.State.Status}}
I1124 13:48:04.584062 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:48:04.598587 607669 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:04.598613 607669 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 13:48:04.598683 607669 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-513442
I1124 13:48:04.628606 607669 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33435 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/old-k8s-version-513442/id_rsa Username:docker}
I1124 13:48:04.653771 607669 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.94.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 13:48:04.701037 607669 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:48:04.714197 607669 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:04.765729 607669 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:04.912320 607669 start.go:977] {"host.minikube.internal": 192.168.94.1} host record injected into CoreDNS's ConfigMap
I1124 13:48:04.913621 607669 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-513442" to be "Ready" ...
I1124 13:48:05.136398 607669 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 13:48:05.160590 608917 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1124 13:48:05.160664 608917 kubeadm.go:319] [preflight] Running pre-flight checks
I1124 13:48:05.160771 608917 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1124 13:48:05.160854 608917 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1124 13:48:05.160886 608917 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1124 13:48:05.160993 608917 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1124 13:48:05.161038 608917 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1124 13:48:05.161128 608917 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1124 13:48:05.161215 608917 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1124 13:48:05.161290 608917 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1124 13:48:05.161348 608917 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1124 13:48:05.161407 608917 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1124 13:48:05.161478 608917 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1124 13:48:05.161607 608917 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1124 13:48:05.161758 608917 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1124 13:48:05.161894 608917 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1124 13:48:05.162009 608917 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1124 13:48:05.163691 608917 out.go:252] - Generating certificates and keys ...
I1124 13:48:05.163805 608917 kubeadm.go:319] [certs] Using existing ca certificate authority
I1124 13:48:05.163947 608917 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1124 13:48:05.164054 608917 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1124 13:48:05.164154 608917 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1124 13:48:05.164250 608917 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1124 13:48:05.164325 608917 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1124 13:48:05.164403 608917 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1124 13:48:05.164579 608917 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-608395] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 13:48:05.164662 608917 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1124 13:48:05.164844 608917 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-608395] and IPs [192.168.103.2 127.0.0.1 ::1]
I1124 13:48:05.164993 608917 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1124 13:48:05.165088 608917 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1124 13:48:05.165130 608917 kubeadm.go:319] [certs] Generating "sa" key and public key
I1124 13:48:05.165182 608917 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1124 13:48:05.165250 608917 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1124 13:48:05.165313 608917 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1124 13:48:05.165382 608917 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1124 13:48:05.165456 608917 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1124 13:48:05.165506 608917 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1124 13:48:05.165580 608917 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1124 13:48:05.165637 608917 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1124 13:48:05.167858 608917 out.go:252] - Booting up control plane ...
I1124 13:48:05.167962 608917 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1124 13:48:05.168043 608917 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1124 13:48:05.168104 608917 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1124 13:48:05.168199 608917 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1124 13:48:05.168298 608917 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1124 13:48:05.168436 608917 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1124 13:48:05.168514 608917 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1124 13:48:05.168558 608917 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1124 13:48:05.168715 608917 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1124 13:48:05.168854 608917 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1124 13:48:05.168953 608917 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.001985013s
I1124 13:48:05.169093 608917 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1124 13:48:05.169202 608917 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.103.2:8443/livez
I1124 13:48:05.169339 608917 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1124 13:48:05.169461 608917 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1124 13:48:05.169582 608917 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 2.171045551s
I1124 13:48:05.169691 608917 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 2.746683308s
I1124 13:48:05.169782 608917 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 5.002983514s
I1124 13:48:05.169958 608917 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1124 13:48:05.170079 608917 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1124 13:48:05.170136 608917 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1124 13:48:05.170449 608917 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-608395 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1124 13:48:05.170534 608917 kubeadm.go:319] [bootstrap-token] Using token: 0m3tk6.bp5t9g266aj6zg5e
I1124 13:48:05.172344 608917 out.go:252] - Configuring RBAC rules ...
I1124 13:48:05.172497 608917 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1124 13:48:05.172606 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1124 13:48:05.172790 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1124 13:48:05.172947 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1124 13:48:05.173067 608917 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1124 13:48:05.173152 608917 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1124 13:48:05.173251 608917 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1124 13:48:05.173290 608917 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1124 13:48:05.173330 608917 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1124 13:48:05.173336 608917 kubeadm.go:319]
I1124 13:48:05.173391 608917 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1124 13:48:05.173397 608917 kubeadm.go:319]
I1124 13:48:05.173470 608917 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1124 13:48:05.173476 608917 kubeadm.go:319]
I1124 13:48:05.173498 608917 kubeadm.go:319] mkdir -p $HOME/.kube
I1124 13:48:05.173553 608917 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1124 13:48:05.173610 608917 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1124 13:48:05.173623 608917 kubeadm.go:319]
I1124 13:48:05.173669 608917 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1124 13:48:05.173675 608917 kubeadm.go:319]
I1124 13:48:05.173718 608917 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1124 13:48:05.173727 608917 kubeadm.go:319]
I1124 13:48:05.173778 608917 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1124 13:48:05.173858 608917 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1124 13:48:05.173981 608917 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1124 13:48:05.173990 608917 kubeadm.go:319]
I1124 13:48:05.174085 608917 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1124 13:48:05.174165 608917 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1124 13:48:05.174170 608917 kubeadm.go:319]
I1124 13:48:05.174250 608917 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 0m3tk6.bp5t9g266aj6zg5e \
I1124 13:48:05.174352 608917 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c \
I1124 13:48:05.174376 608917 kubeadm.go:319] --control-plane
I1124 13:48:05.174381 608917 kubeadm.go:319]
I1124 13:48:05.174459 608917 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1124 13:48:05.174465 608917 kubeadm.go:319]
I1124 13:48:05.174560 608917 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 0m3tk6.bp5t9g266aj6zg5e \
I1124 13:48:05.174802 608917 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:32fb1839a00503b33822b75b81c2f42d5061d18404c0a5cd12189dec7e20658c
I1124 13:48:05.174826 608917 cni.go:84] Creating CNI manager for ""
I1124 13:48:05.174836 608917 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1124 13:48:05.177484 608917 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1124 13:48:05.137677 607669 addons.go:530] duration metric: took 608.290782ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 13:48:01.553682 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:02.346718 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": read tcp 192.168.76.1:51122->192.168.76.2:8443: read: connection reset by peer
I1124 13:48:02.346797 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:02.346868 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:02.379430 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:02.379461 572647 cri.go:89] found id: "6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:48:02.379468 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:02.379472 572647 cri.go:89] found id: ""
I1124 13:48:02.379481 572647 logs.go:282] 3 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:02.379554 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.384666 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.389028 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.393413 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:02.393493 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:02.423298 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:02.423317 572647 cri.go:89] found id: ""
I1124 13:48:02.423325 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:02.423377 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.428323 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:02.428396 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:02.458971 572647 cri.go:89] found id: ""
I1124 13:48:02.459002 572647 logs.go:282] 0 containers: []
W1124 13:48:02.459014 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:02.459023 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:02.459136 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:02.495221 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:02.495253 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:02.495258 572647 cri.go:89] found id: ""
I1124 13:48:02.495267 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:02.495325 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.504536 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.513709 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:02.513782 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:02.545556 572647 cri.go:89] found id: ""
I1124 13:48:02.545589 572647 logs.go:282] 0 containers: []
W1124 13:48:02.545603 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:02.545613 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:02.545686 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:02.575683 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:02.575710 572647 cri.go:89] found id: "daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:48:02.575714 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:02.575717 572647 cri.go:89] found id: ""
I1124 13:48:02.575725 572647 logs.go:282] 3 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:02.575799 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.580340 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.584784 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:02.588717 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:02.588774 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:02.617522 572647 cri.go:89] found id: ""
I1124 13:48:02.617550 572647 logs.go:282] 0 containers: []
W1124 13:48:02.617558 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:02.617567 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:02.617616 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:02.647375 572647 cri.go:89] found id: ""
I1124 13:48:02.647407 572647 logs.go:282] 0 containers: []
W1124 13:48:02.647418 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:02.647432 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:02.647445 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:02.685850 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:02.685900 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:02.794118 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:02.794164 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:02.866960 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:02.866982 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:02.866997 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:02.908627 572647 logs.go:123] Gathering logs for kube-apiserver [6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8] ...
I1124 13:48:02.908671 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6ba099dbfe03c53cb7a40393cab6635322c5372979bf7ba6869730b7b76a01e8"
I1124 13:48:02.949348 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:02.949380 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:02.997498 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:02.997541 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:03.065816 572647 logs.go:123] Gathering logs for kube-controller-manager [daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf] ...
I1124 13:48:03.065856 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 daace7b3ca5876bbcd7819611db0917a66e6e74f443673d2d192e8840d66bcbf"
I1124 13:48:03.101360 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:03.101393 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:03.140140 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:03.140183 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:03.160020 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:03.160058 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:03.202092 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:03.202136 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:03.247020 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:03.247060 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:03.283475 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:03.283518 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:05.832996 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:05.833478 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:05.833543 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:05.833607 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:05.862229 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:05.862254 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:05.862258 572647 cri.go:89] found id: ""
I1124 13:48:05.862267 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:05.862320 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.867091 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.871378 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:05.871455 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:05.900338 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:05.900361 572647 cri.go:89] found id: ""
I1124 13:48:05.900370 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:05.900428 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.904531 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:05.904606 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:05.933536 572647 cri.go:89] found id: ""
I1124 13:48:05.933565 572647 logs.go:282] 0 containers: []
W1124 13:48:05.933579 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:05.933587 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:05.933645 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:05.961942 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:05.961966 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:05.961980 572647 cri.go:89] found id: ""
I1124 13:48:05.961988 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:05.962048 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.966413 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:05.970560 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:05.970640 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:05.999021 572647 cri.go:89] found id: ""
I1124 13:48:05.999046 572647 logs.go:282] 0 containers: []
W1124 13:48:05.999057 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:05.999065 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:05.999125 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:06.030192 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:06.030216 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:06.030222 572647 cri.go:89] found id: ""
I1124 13:48:06.030233 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:06.030291 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:06.034509 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:06.038518 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:06.038602 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:06.067432 572647 cri.go:89] found id: ""
I1124 13:48:06.067459 572647 logs.go:282] 0 containers: []
W1124 13:48:06.067469 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:06.067477 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:06.067557 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:06.098683 572647 cri.go:89] found id: ""
I1124 13:48:06.098712 572647 logs.go:282] 0 containers: []
W1124 13:48:06.098723 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:06.098736 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:06.098753 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:06.163737 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:06.163765 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:06.163783 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:05.179143 608917 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1124 13:48:05.184780 608917 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1124 13:48:05.184802 608917 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1124 13:48:05.199547 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1124 13:48:05.451312 608917 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1124 13:48:05.451481 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:05.451599 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-608395 minikube.k8s.io/updated_at=2025_11_24T13_48_05_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab minikube.k8s.io/name=no-preload-608395 minikube.k8s.io/primary=true
I1124 13:48:05.479434 608917 ops.go:34] apiserver oom_adj: -16
I1124 13:48:05.560179 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:06.061204 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:06.560802 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:07.061219 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:07.561139 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:08.061015 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:08.561034 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:09.061268 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:09.560397 608917 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1124 13:48:09.636185 608917 kubeadm.go:1114] duration metric: took 4.184744627s to wait for elevateKubeSystemPrivileges
I1124 13:48:09.636235 608917 kubeadm.go:403] duration metric: took 14.511667218s to StartCluster
I1124 13:48:09.636257 608917 settings.go:142] acquiring lock: {Name:mka599a3c9bae62ffb84d261186583052ce40f68 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:09.636332 608917 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21932-370498/kubeconfig
I1124 13:48:09.637980 608917 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21932-370498/kubeconfig: {Name:mk44e8f04ffd8592063c19ad1e339ad14aaa66a2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1124 13:48:09.638233 608917 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1124 13:48:09.638262 608917 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1124 13:48:09.638340 608917 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1124 13:48:09.638439 608917 addons.go:70] Setting storage-provisioner=true in profile "no-preload-608395"
I1124 13:48:09.638460 608917 addons.go:239] Setting addon storage-provisioner=true in "no-preload-608395"
I1124 13:48:09.638459 608917 addons.go:70] Setting default-storageclass=true in profile "no-preload-608395"
I1124 13:48:09.638486 608917 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-608395"
I1124 13:48:09.638512 608917 host.go:66] Checking if "no-preload-608395" exists ...
I1124 13:48:09.638608 608917 config.go:182] Loaded profile config "no-preload-608395": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1124 13:48:09.638889 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:48:09.639090 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:48:09.640719 608917 out.go:179] * Verifying Kubernetes components...
I1124 13:48:09.642235 608917 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1124 13:48:09.665980 608917 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1124 13:48:09.668239 608917 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:09.668262 608917 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1124 13:48:09.668334 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:48:09.668545 608917 addons.go:239] Setting addon default-storageclass=true in "no-preload-608395"
I1124 13:48:09.668594 608917 host.go:66] Checking if "no-preload-608395" exists ...
I1124 13:48:09.669115 608917 cli_runner.go:164] Run: docker container inspect no-preload-608395 --format={{.State.Status}}
I1124 13:48:09.708052 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:48:09.711213 608917 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:09.711236 608917 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1124 13:48:09.711297 608917 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-608395
I1124 13:48:09.737250 608917 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33441 SSHKeyPath:/home/jenkins/minikube-integration/21932-370498/.minikube/machines/no-preload-608395/id_rsa Username:docker}
I1124 13:48:09.745340 608917 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1124 13:48:09.808489 608917 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1124 13:48:09.832661 608917 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1124 13:48:09.863280 608917 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1124 13:48:09.941101 608917 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1124 13:48:09.942521 608917 node_ready.go:35] waiting up to 6m0s for node "no-preload-608395" to be "Ready" ...
I1124 13:48:10.163475 608917 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1124 13:48:05.418106 607669 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-513442" context rescaled to 1 replicas
W1124 13:48:06.917478 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
W1124 13:48:09.417409 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
I1124 13:48:06.199640 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:06.199675 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:06.235793 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:06.235827 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:06.290172 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:06.290212 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:06.325935 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:06.325975 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:06.359485 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:06.359523 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:06.406787 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:06.406834 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:06.503206 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:06.503251 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:06.520877 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:06.520924 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:06.561472 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:06.561510 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:06.591722 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:06.591748 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:09.128043 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:09.128549 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:09.128609 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:09.128678 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:09.158194 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:09.158216 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:09.158220 572647 cri.go:89] found id: ""
I1124 13:48:09.158229 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:09.158308 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.162575 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.167402 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:09.167472 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:09.196608 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:09.196633 572647 cri.go:89] found id: ""
I1124 13:48:09.196645 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:09.196709 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.201107 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:09.201190 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:09.232265 572647 cri.go:89] found id: ""
I1124 13:48:09.232300 572647 logs.go:282] 0 containers: []
W1124 13:48:09.232311 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:09.232320 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:09.232386 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:09.272990 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:09.273017 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:09.273022 572647 cri.go:89] found id: ""
I1124 13:48:09.273033 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:09.273100 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.278614 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.283409 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:09.283485 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:09.314562 572647 cri.go:89] found id: ""
I1124 13:48:09.314592 572647 logs.go:282] 0 containers: []
W1124 13:48:09.314604 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:09.314611 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:09.314682 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:09.346903 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:09.346963 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:09.346970 572647 cri.go:89] found id: ""
I1124 13:48:09.346979 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:09.347049 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.351444 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:09.355601 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:09.355675 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:09.387667 572647 cri.go:89] found id: ""
I1124 13:48:09.387697 572647 logs.go:282] 0 containers: []
W1124 13:48:09.387709 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:09.387716 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:09.387779 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:09.417828 572647 cri.go:89] found id: ""
I1124 13:48:09.417854 572647 logs.go:282] 0 containers: []
W1124 13:48:09.417863 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:09.417876 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:09.417894 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:09.518663 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:09.518707 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:09.538049 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:09.538093 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:09.606209 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:09.606232 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:09.606246 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:09.646703 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:09.646736 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:09.708037 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:09.708078 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:09.779698 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:09.779735 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:09.819613 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:09.819663 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:09.867349 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:09.867388 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:09.917580 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:09.917620 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:09.959751 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:09.959793 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:10.006236 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:10.006274 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:10.165110 608917 addons.go:530] duration metric: took 526.764143ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1124 13:48:10.444998 608917 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-608395" context rescaled to 1 replicas
W1124 13:48:11.948043 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:14.445721 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:11.417485 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
W1124 13:48:13.418201 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
I1124 13:48:12.563487 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:12.564031 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:12.564091 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:12.564151 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:12.598524 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:12.598553 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:12.598559 572647 cri.go:89] found id: ""
I1124 13:48:12.598570 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:12.598654 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.603466 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.608383 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:12.608462 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:12.652395 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:12.652422 572647 cri.go:89] found id: ""
I1124 13:48:12.652433 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:12.652503 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.657966 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:12.658060 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:12.693432 572647 cri.go:89] found id: ""
I1124 13:48:12.693468 572647 logs.go:282] 0 containers: []
W1124 13:48:12.693480 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:12.693489 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:12.693558 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:12.731546 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:12.731572 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:12.731579 572647 cri.go:89] found id: ""
I1124 13:48:12.731590 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:12.731820 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.737055 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.741859 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:12.741953 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:12.776627 572647 cri.go:89] found id: ""
I1124 13:48:12.776652 572647 logs.go:282] 0 containers: []
W1124 13:48:12.776660 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:12.776667 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:12.776735 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:12.809077 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:12.809099 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:12.809102 572647 cri.go:89] found id: ""
I1124 13:48:12.809112 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:12.809166 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.813963 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:12.818488 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:12.818563 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:12.852844 572647 cri.go:89] found id: ""
I1124 13:48:12.852879 572647 logs.go:282] 0 containers: []
W1124 13:48:12.852891 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:12.852900 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:12.853034 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:12.889177 572647 cri.go:89] found id: ""
I1124 13:48:12.889228 572647 logs.go:282] 0 containers: []
W1124 13:48:12.889240 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:12.889255 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:12.889278 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:12.941108 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:12.941146 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:13.012950 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:13.012998 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:13.059324 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:13.059367 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:13.096188 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:13.096235 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:13.157287 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:13.157338 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:13.198203 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:13.198250 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:13.219729 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:13.219773 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:13.293315 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:13.293338 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:13.293356 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:13.338975 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:13.339029 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:13.385546 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:13.385596 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:13.427130 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:13.427162 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:16.027717 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:16.028251 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:16.028310 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:16.028363 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:16.058811 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:16.058839 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:16.058847 572647 cri.go:89] found id: ""
I1124 13:48:16.058858 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:16.058999 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.063797 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.068208 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:16.068282 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:16.097374 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:16.097404 572647 cri.go:89] found id: ""
I1124 13:48:16.097416 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:16.097484 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.102967 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:16.103045 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:16.133626 572647 cri.go:89] found id: ""
I1124 13:48:16.133660 572647 logs.go:282] 0 containers: []
W1124 13:48:16.133670 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:16.133676 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:16.133746 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:16.165392 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:16.165424 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:16.165431 572647 cri.go:89] found id: ""
I1124 13:48:16.165442 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:16.165507 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.170277 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.174579 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:16.174661 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
W1124 13:48:16.445831 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:18.945868 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:15.917184 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
W1124 13:48:17.917526 607669 node_ready.go:57] node "old-k8s-version-513442" has "Ready":"False" status (will retry)
I1124 13:48:19.416721 607669 node_ready.go:49] node "old-k8s-version-513442" is "Ready"
I1124 13:48:19.416760 607669 node_ready.go:38] duration metric: took 14.503103561s for node "old-k8s-version-513442" to be "Ready" ...
I1124 13:48:19.416778 607669 api_server.go:52] waiting for apiserver process to appear ...
I1124 13:48:19.416833 607669 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 13:48:19.430267 607669 api_server.go:72] duration metric: took 14.90093273s to wait for apiserver process to appear ...
I1124 13:48:19.430299 607669 api_server.go:88] waiting for apiserver healthz status ...
I1124 13:48:19.430326 607669 api_server.go:253] Checking apiserver healthz at https://192.168.94.2:8443/healthz ...
I1124 13:48:19.436844 607669 api_server.go:279] https://192.168.94.2:8443/healthz returned 200:
ok
I1124 13:48:19.438582 607669 api_server.go:141] control plane version: v1.28.0
I1124 13:48:19.438618 607669 api_server.go:131] duration metric: took 8.311152ms to wait for apiserver health ...
I1124 13:48:19.438632 607669 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 13:48:19.443134 607669 system_pods.go:59] 8 kube-system pods found
I1124 13:48:19.443191 607669 system_pods.go:61] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:19.443200 607669 system_pods.go:61] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:19.443207 607669 system_pods.go:61] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:19.443213 607669 system_pods.go:61] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:19.443219 607669 system_pods.go:61] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:19.443225 607669 system_pods.go:61] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:19.443231 607669 system_pods.go:61] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:19.443240 607669 system_pods.go:61] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:19.443248 607669 system_pods.go:74] duration metric: took 4.608559ms to wait for pod list to return data ...
I1124 13:48:19.443260 607669 default_sa.go:34] waiting for default service account to be created ...
I1124 13:48:19.446125 607669 default_sa.go:45] found service account: "default"
I1124 13:48:19.446157 607669 default_sa.go:55] duration metric: took 2.890045ms for default service account to be created ...
I1124 13:48:19.446170 607669 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 13:48:19.450324 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:19.450375 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:19.450385 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:19.450394 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:19.450408 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:19.450415 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:19.450425 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:19.450434 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:19.450449 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:19.450484 607669 retry.go:31] will retry after 306.547577ms: missing components: kube-dns
I1124 13:48:19.761785 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:19.761821 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:19.761828 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:19.761835 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:19.761839 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:19.761843 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:19.761846 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:19.761850 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:19.761855 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:19.761871 607669 retry.go:31] will retry after 263.639636ms: missing components: kube-dns
I1124 13:48:20.030723 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:20.030764 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:20.030773 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:20.030781 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:20.030787 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:20.030794 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:20.030799 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:20.030804 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:20.030812 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:20.030836 607669 retry.go:31] will retry after 485.23875ms: missing components: kube-dns
I1124 13:48:16.203971 572647 cri.go:89] found id: ""
I1124 13:48:16.204004 572647 logs.go:282] 0 containers: []
W1124 13:48:16.204016 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:16.204025 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:16.204087 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:16.233087 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:16.233113 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:16.233119 572647 cri.go:89] found id: ""
I1124 13:48:16.233130 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:16.233184 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.237937 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:16.242366 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:16.242450 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:16.273007 572647 cri.go:89] found id: ""
I1124 13:48:16.273034 572647 logs.go:282] 0 containers: []
W1124 13:48:16.273043 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:16.273049 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:16.273100 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:16.302483 572647 cri.go:89] found id: ""
I1124 13:48:16.302518 572647 logs.go:282] 0 containers: []
W1124 13:48:16.302537 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:16.302553 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:16.302575 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:16.360777 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:16.360817 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:16.391672 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:16.391700 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:16.490704 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:16.490743 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:16.530411 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:16.530448 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:16.567070 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:16.567107 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:16.601689 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:16.601728 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:16.646105 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:16.646143 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:16.682522 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:16.682560 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:16.699850 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:16.699887 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:16.759811 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:16.759835 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:16.759853 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:16.795013 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:16.795048 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:19.334057 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:19.334568 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:19.334661 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:19.334733 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:19.365714 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:19.365735 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:19.365739 572647 cri.go:89] found id: ""
I1124 13:48:19.365747 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:19.365800 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.370354 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.374856 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:19.374992 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:19.405492 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:19.405519 572647 cri.go:89] found id: ""
I1124 13:48:19.405529 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:19.405589 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.411364 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:19.411426 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:19.443360 572647 cri.go:89] found id: ""
I1124 13:48:19.443391 572647 logs.go:282] 0 containers: []
W1124 13:48:19.443404 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:19.443412 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:19.443477 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:19.475298 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:19.475324 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:19.475331 572647 cri.go:89] found id: ""
I1124 13:48:19.475341 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:19.475407 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.480369 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.484782 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:19.484863 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:19.514622 572647 cri.go:89] found id: ""
I1124 13:48:19.514666 572647 logs.go:282] 0 containers: []
W1124 13:48:19.514716 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:19.514726 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:19.514807 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:19.550847 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:19.550872 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:19.550877 572647 cri.go:89] found id: ""
I1124 13:48:19.550886 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:19.550963 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.556478 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:19.561320 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:19.561401 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:19.596190 572647 cri.go:89] found id: ""
I1124 13:48:19.596226 572647 logs.go:282] 0 containers: []
W1124 13:48:19.596238 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:19.596247 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:19.596309 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:19.627382 572647 cri.go:89] found id: ""
I1124 13:48:19.627413 572647 logs.go:282] 0 containers: []
W1124 13:48:19.627424 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:19.627436 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:19.627452 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:19.694796 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:19.694836 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:19.752858 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:19.752896 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:19.788182 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:19.788224 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:19.879216 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:19.879255 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:19.940757 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:19.940776 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:19.940790 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:19.979681 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:19.979726 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:20.020042 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:20.020085 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:20.064463 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:20.064499 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:20.098012 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:20.098044 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:20.132122 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:20.132157 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:20.148958 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:20.148997 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:20.521094 607669 system_pods.go:86] 8 kube-system pods found
I1124 13:48:20.521123 607669 system_pods.go:89] "coredns-5dd5756b68-b5rrl" [4e6c9b7c-5f0a-4c60-8197-20e985a07403] Running
I1124 13:48:20.521130 607669 system_pods.go:89] "etcd-old-k8s-version-513442" [0b1a1913-a17b-4362-af66-49436a831759] Running
I1124 13:48:20.521133 607669 system_pods.go:89] "kindnet-tpjvb" [c7df115a-8394-4f80-ac6c-5b1fc95337b5] Running
I1124 13:48:20.521137 607669 system_pods.go:89] "kube-apiserver-old-k8s-version-513442" [722a96a1-58fb-4240-9c3b-4732b2fc0877] Running
I1124 13:48:20.521141 607669 system_pods.go:89] "kube-controller-manager-old-k8s-version-513442" [df7953a7-c9cf-4854-b6bb-c43b0415e709] Running
I1124 13:48:20.521145 607669 system_pods.go:89] "kube-proxy-hzfcx" [f4ba208a-1a78-46ae-9684-ff3309400852] Running
I1124 13:48:20.521148 607669 system_pods.go:89] "kube-scheduler-old-k8s-version-513442" [c400bc97-a209-437d-ba96-60c58a4b8878] Running
I1124 13:48:20.521151 607669 system_pods.go:89] "storage-provisioner" [65efb270-100a-4e7c-bee8-24de1df28586] Running
I1124 13:48:20.521159 607669 system_pods.go:126] duration metric: took 1.074982882s to wait for k8s-apps to be running ...
I1124 13:48:20.521166 607669 system_svc.go:44] waiting for kubelet service to be running ....
I1124 13:48:20.521215 607669 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 13:48:20.535666 607669 system_svc.go:56] duration metric: took 14.486184ms WaitForService to wait for kubelet
I1124 13:48:20.535706 607669 kubeadm.go:587] duration metric: took 16.006375183s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:48:20.535732 607669 node_conditions.go:102] verifying NodePressure condition ...
I1124 13:48:20.538619 607669 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 13:48:20.538646 607669 node_conditions.go:123] node cpu capacity is 8
I1124 13:48:20.538662 607669 node_conditions.go:105] duration metric: took 2.9245ms to run NodePressure ...
I1124 13:48:20.538676 607669 start.go:242] waiting for startup goroutines ...
I1124 13:48:20.538683 607669 start.go:247] waiting for cluster config update ...
I1124 13:48:20.538693 607669 start.go:256] writing updated cluster config ...
I1124 13:48:20.539040 607669 ssh_runner.go:195] Run: rm -f paused
I1124 13:48:20.543325 607669 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:20.547793 607669 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-b5rrl" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.552447 607669 pod_ready.go:94] pod "coredns-5dd5756b68-b5rrl" is "Ready"
I1124 13:48:20.552472 607669 pod_ready.go:86] duration metric: took 4.651627ms for pod "coredns-5dd5756b68-b5rrl" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.556328 607669 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.561689 607669 pod_ready.go:94] pod "etcd-old-k8s-version-513442" is "Ready"
I1124 13:48:20.561717 607669 pod_ready.go:86] duration metric: took 5.363766ms for pod "etcd-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.564634 607669 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.569265 607669 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-513442" is "Ready"
I1124 13:48:20.569291 607669 pod_ready.go:86] duration metric: took 4.631558ms for pod "kube-apiserver-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.572304 607669 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:20.948397 607669 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-513442" is "Ready"
I1124 13:48:20.948423 607669 pod_ready.go:86] duration metric: took 376.095956ms for pod "kube-controller-manager-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:21.148648 607669 pod_ready.go:83] waiting for pod "kube-proxy-hzfcx" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:21.548255 607669 pod_ready.go:94] pod "kube-proxy-hzfcx" is "Ready"
I1124 13:48:21.548288 607669 pod_ready.go:86] duration metric: took 399.608636ms for pod "kube-proxy-hzfcx" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:21.748744 607669 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:22.147789 607669 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-513442" is "Ready"
I1124 13:48:22.147821 607669 pod_ready.go:86] duration metric: took 399.0528ms for pod "kube-scheduler-old-k8s-version-513442" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:22.147833 607669 pod_ready.go:40] duration metric: took 1.604464617s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:22.193883 607669 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1124 13:48:22.196207 607669 out.go:203]
W1124 13:48:22.197964 607669 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1124 13:48:22.199516 607669 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1124 13:48:22.201541 607669 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-513442" cluster and "default" namespace by default
W1124 13:48:20.947014 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
W1124 13:48:22.948554 608917 node_ready.go:57] node "no-preload-608395" has "Ready":"False" status (will retry)
I1124 13:48:24.446130 608917 node_ready.go:49] node "no-preload-608395" is "Ready"
I1124 13:48:24.446168 608917 node_ready.go:38] duration metric: took 14.503611427s for node "no-preload-608395" to be "Ready" ...
I1124 13:48:24.446195 608917 api_server.go:52] waiting for apiserver process to appear ...
I1124 13:48:24.446254 608917 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1124 13:48:24.460952 608917 api_server.go:72] duration metric: took 14.82264088s to wait for apiserver process to appear ...
I1124 13:48:24.460990 608917 api_server.go:88] waiting for apiserver healthz status ...
I1124 13:48:24.461021 608917 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1124 13:48:24.466768 608917 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1124 13:48:24.468117 608917 api_server.go:141] control plane version: v1.34.1
I1124 13:48:24.468151 608917 api_server.go:131] duration metric: took 7.151862ms to wait for apiserver health ...
I1124 13:48:24.468164 608917 system_pods.go:43] waiting for kube-system pods to appear ...
I1124 13:48:24.473836 608917 system_pods.go:59] 8 kube-system pods found
I1124 13:48:24.473891 608917 system_pods.go:61] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.473901 608917 system_pods.go:61] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.473965 608917 system_pods.go:61] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.473980 608917 system_pods.go:61] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.473987 608917 system_pods.go:61] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.473995 608917 system_pods.go:61] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.474001 608917 system_pods.go:61] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.474011 608917 system_pods.go:61] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.474025 608917 system_pods.go:74] duration metric: took 5.853076ms to wait for pod list to return data ...
I1124 13:48:24.474037 608917 default_sa.go:34] waiting for default service account to be created ...
I1124 13:48:24.476681 608917 default_sa.go:45] found service account: "default"
I1124 13:48:24.476712 608917 default_sa.go:55] duration metric: took 2.661232ms for default service account to be created ...
I1124 13:48:24.476724 608917 system_pods.go:116] waiting for k8s-apps to be running ...
I1124 13:48:24.479715 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:24.479757 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.479765 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.479776 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.479782 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.479788 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.479793 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.479798 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.479806 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.479831 608917 retry.go:31] will retry after 257.034103ms: missing components: kube-dns
I1124 13:48:24.740811 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:24.740842 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.740848 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.740854 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.740858 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.740863 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.740866 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.740869 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.740876 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.740892 608917 retry.go:31] will retry after 244.335921ms: missing components: kube-dns
I1124 13:48:24.989021 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:24.989054 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:24.989061 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:24.989067 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:24.989072 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:24.989077 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:24.989080 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:24.989084 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:24.989089 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:24.989104 608917 retry.go:31] will retry after 431.238044ms: missing components: kube-dns
I1124 13:48:22.686011 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:22.686450 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:22.686506 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:22.686563 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:22.718842 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:22.718868 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:22.718874 572647 cri.go:89] found id: ""
I1124 13:48:22.718885 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:22.719025 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.724051 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.728627 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:22.728697 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:22.758279 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:22.758305 572647 cri.go:89] found id: ""
I1124 13:48:22.758315 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:22.758378 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.762905 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:22.763025 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:22.796176 572647 cri.go:89] found id: ""
I1124 13:48:22.796207 572647 logs.go:282] 0 containers: []
W1124 13:48:22.796218 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:22.796227 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:22.796293 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:22.828770 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:22.828801 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:22.828815 572647 cri.go:89] found id: ""
I1124 13:48:22.828827 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:22.828886 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.833530 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.837668 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:22.837750 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:22.867760 572647 cri.go:89] found id: ""
I1124 13:48:22.867793 572647 logs.go:282] 0 containers: []
W1124 13:48:22.867806 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:22.867815 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:22.867976 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:22.899275 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:22.899305 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:22.899312 572647 cri.go:89] found id: ""
I1124 13:48:22.899327 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:22.899391 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.903859 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:22.908121 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:22.908190 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:22.938883 572647 cri.go:89] found id: ""
I1124 13:48:22.938961 572647 logs.go:282] 0 containers: []
W1124 13:48:22.938972 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:22.938980 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:22.939033 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:22.969840 572647 cri.go:89] found id: ""
I1124 13:48:22.969864 572647 logs.go:282] 0 containers: []
W1124 13:48:22.969872 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:22.969882 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:22.969903 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:23.031386 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:23.031411 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:23.031425 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:23.067770 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:23.067805 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:23.104851 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:23.104886 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:23.160621 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:23.160668 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:23.190994 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:23.191026 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:23.226509 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:23.226542 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:23.269082 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:23.269130 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:23.360572 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:23.360613 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:23.399049 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:23.399089 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:23.440241 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:23.440282 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:23.474172 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:23.474212 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:25.992569 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:25.993167 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:25.993241 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:25.993310 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:26.021789 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:26.021816 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:26.021823 572647 cri.go:89] found id: ""
I1124 13:48:26.021834 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:26.021985 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.027084 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.031267 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:26.031350 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:26.063349 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:26.063379 572647 cri.go:89] found id: ""
I1124 13:48:26.063390 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:26.063448 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.068064 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:26.068140 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:26.096106 572647 cri.go:89] found id: ""
I1124 13:48:26.096148 572647 logs.go:282] 0 containers: []
W1124 13:48:26.096158 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:26.096165 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:26.096220 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:26.126156 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:26.126186 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:26.126193 572647 cri.go:89] found id: ""
I1124 13:48:26.126205 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:26.126275 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.131369 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.135595 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:26.135657 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:26.163133 572647 cri.go:89] found id: ""
I1124 13:48:26.163161 572647 logs.go:282] 0 containers: []
W1124 13:48:26.163169 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:26.163187 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:26.163244 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:26.192355 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:26.192378 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:26.192384 572647 cri.go:89] found id: ""
I1124 13:48:26.192394 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:26.192549 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:26.197316 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:25.424597 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:25.424631 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1124 13:48:25.424636 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:25.424642 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:25.424646 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:25.424650 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:25.424653 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:25.424656 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:25.424663 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1124 13:48:25.424679 608917 retry.go:31] will retry after 458.014987ms: missing components: kube-dns
I1124 13:48:25.886603 608917 system_pods.go:86] 8 kube-system pods found
I1124 13:48:25.886633 608917 system_pods.go:89] "coredns-66bc5c9577-rcf8v" [a909252f-b923-46e8-acff-b0d0943c4a29] Running
I1124 13:48:25.886641 608917 system_pods.go:89] "etcd-no-preload-608395" [b9426983-537c-4c4f-a8dd-3378b24f66f3] Running
I1124 13:48:25.886644 608917 system_pods.go:89] "kindnet-zqlgn" [dc580d4e-c35b-4def-94d4-43697fee08ef] Running
I1124 13:48:25.886649 608917 system_pods.go:89] "kube-apiserver-no-preload-608395" [00ece03a-94a4-4b04-8ee2-a6f539022a06] Running
I1124 13:48:25.886653 608917 system_pods.go:89] "kube-controller-manager-no-preload-608395" [f4744606-354b-472e-a224-38df2dd201ca] Running
I1124 13:48:25.886657 608917 system_pods.go:89] "kube-proxy-5vj5p" [2e67d44e-9eb4-4bb7-a087-a76def391cbb] Running
I1124 13:48:25.886660 608917 system_pods.go:89] "kube-scheduler-no-preload-608395" [5bf4e205-28fb-4838-99bb-4fc91fe8642b] Running
I1124 13:48:25.886663 608917 system_pods.go:89] "storage-provisioner" [c3c5ce52-cc27-4ccb-8bfb-e8f60c0c8faa] Running
I1124 13:48:25.886671 608917 system_pods.go:126] duration metric: took 1.409940532s to wait for k8s-apps to be running ...
I1124 13:48:25.886680 608917 system_svc.go:44] waiting for kubelet service to be running ....
I1124 13:48:25.886726 608917 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1124 13:48:25.901294 608917 system_svc.go:56] duration metric: took 14.604723ms WaitForService to wait for kubelet
I1124 13:48:25.901324 608917 kubeadm.go:587] duration metric: took 16.26302303s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1124 13:48:25.901343 608917 node_conditions.go:102] verifying NodePressure condition ...
I1124 13:48:25.904190 608917 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1124 13:48:25.904219 608917 node_conditions.go:123] node cpu capacity is 8
I1124 13:48:25.904234 608917 node_conditions.go:105] duration metric: took 2.88688ms to run NodePressure ...
I1124 13:48:25.904249 608917 start.go:242] waiting for startup goroutines ...
I1124 13:48:25.904256 608917 start.go:247] waiting for cluster config update ...
I1124 13:48:25.904266 608917 start.go:256] writing updated cluster config ...
I1124 13:48:25.904560 608917 ssh_runner.go:195] Run: rm -f paused
I1124 13:48:25.909215 608917 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:25.912986 608917 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-rcf8v" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.917301 608917 pod_ready.go:94] pod "coredns-66bc5c9577-rcf8v" is "Ready"
I1124 13:48:25.917324 608917 pod_ready.go:86] duration metric: took 4.297309ms for pod "coredns-66bc5c9577-rcf8v" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.919442 608917 pod_ready.go:83] waiting for pod "etcd-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.923976 608917 pod_ready.go:94] pod "etcd-no-preload-608395" is "Ready"
I1124 13:48:25.923999 608917 pod_ready.go:86] duration metric: took 4.535115ms for pod "etcd-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.926003 608917 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.930385 608917 pod_ready.go:94] pod "kube-apiserver-no-preload-608395" is "Ready"
I1124 13:48:25.930413 608917 pod_ready.go:86] duration metric: took 4.382406ms for pod "kube-apiserver-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:25.932261 608917 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:26.313581 608917 pod_ready.go:94] pod "kube-controller-manager-no-preload-608395" is "Ready"
I1124 13:48:26.313615 608917 pod_ready.go:86] duration metric: took 381.333887ms for pod "kube-controller-manager-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:26.514064 608917 pod_ready.go:83] waiting for pod "kube-proxy-5vj5p" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:26.913664 608917 pod_ready.go:94] pod "kube-proxy-5vj5p" is "Ready"
I1124 13:48:26.913702 608917 pod_ready.go:86] duration metric: took 399.60223ms for pod "kube-proxy-5vj5p" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:27.114488 608917 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:27.514056 608917 pod_ready.go:94] pod "kube-scheduler-no-preload-608395" is "Ready"
I1124 13:48:27.514084 608917 pod_ready.go:86] duration metric: took 399.56934ms for pod "kube-scheduler-no-preload-608395" in "kube-system" namespace to be "Ready" or be gone ...
I1124 13:48:27.514098 608917 pod_ready.go:40] duration metric: took 1.604847792s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1124 13:48:27.561310 608917 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1124 13:48:27.563544 608917 out.go:179] * Done! kubectl is now configured to use "no-preload-608395" cluster and "default" namespace by default
I1124 13:48:26.202352 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:26.202439 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:26.231899 572647 cri.go:89] found id: ""
I1124 13:48:26.231953 572647 logs.go:282] 0 containers: []
W1124 13:48:26.231964 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:26.231973 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:26.232040 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:26.263417 572647 cri.go:89] found id: ""
I1124 13:48:26.263446 572647 logs.go:282] 0 containers: []
W1124 13:48:26.263459 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:26.263473 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:26.263488 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:26.354230 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:26.354265 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:26.389608 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:26.389652 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:26.427040 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:26.427077 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:26.466568 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:26.466603 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:26.503710 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:26.503749 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:26.539150 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:26.539193 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:26.583782 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:26.583825 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:26.617656 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:26.617696 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:26.634777 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:26.634809 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:26.693534 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:26.693559 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:26.693577 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:26.748627 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:26.748668 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:29.280171 572647 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1124 13:48:29.280640 572647 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1124 13:48:29.280694 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1124 13:48:29.280748 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1124 13:48:29.309613 572647 cri.go:89] found id: "6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:29.309638 572647 cri.go:89] found id: "707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:29.309644 572647 cri.go:89] found id: ""
I1124 13:48:29.309660 572647 logs.go:282] 2 containers: [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce]
I1124 13:48:29.309730 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.314623 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.319864 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1124 13:48:29.319962 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1124 13:48:29.348671 572647 cri.go:89] found id: "856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:29.348699 572647 cri.go:89] found id: ""
I1124 13:48:29.348709 572647 logs.go:282] 1 containers: [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72]
I1124 13:48:29.348775 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.353662 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1124 13:48:29.353728 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1124 13:48:29.383017 572647 cri.go:89] found id: ""
I1124 13:48:29.383046 572647 logs.go:282] 0 containers: []
W1124 13:48:29.383058 572647 logs.go:284] No container was found matching "coredns"
I1124 13:48:29.383066 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1124 13:48:29.383121 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1124 13:48:29.411238 572647 cri.go:89] found id: "8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:29.411259 572647 cri.go:89] found id: "9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:29.411264 572647 cri.go:89] found id: ""
I1124 13:48:29.411271 572647 logs.go:282] 2 containers: [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2]
I1124 13:48:29.411325 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.415976 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.420189 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1124 13:48:29.420264 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1124 13:48:29.449856 572647 cri.go:89] found id: ""
I1124 13:48:29.449890 572647 logs.go:282] 0 containers: []
W1124 13:48:29.449921 572647 logs.go:284] No container was found matching "kube-proxy"
I1124 13:48:29.449929 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1124 13:48:29.450001 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1124 13:48:29.480136 572647 cri.go:89] found id: "a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:29.480164 572647 cri.go:89] found id: "89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
I1124 13:48:29.480171 572647 cri.go:89] found id: ""
I1124 13:48:29.480181 572647 logs.go:282] 2 containers: [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b]
I1124 13:48:29.480258 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.484998 572647 ssh_runner.go:195] Run: which crictl
I1124 13:48:29.489433 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1124 13:48:29.489504 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1124 13:48:29.519804 572647 cri.go:89] found id: ""
I1124 13:48:29.519841 572647 logs.go:282] 0 containers: []
W1124 13:48:29.519854 572647 logs.go:284] No container was found matching "kindnet"
I1124 13:48:29.519864 572647 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1124 13:48:29.520048 572647 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1124 13:48:29.549935 572647 cri.go:89] found id: ""
I1124 13:48:29.549964 572647 logs.go:282] 0 containers: []
W1124 13:48:29.549974 572647 logs.go:284] No container was found matching "storage-provisioner"
I1124 13:48:29.549986 572647 logs.go:123] Gathering logs for containerd ...
I1124 13:48:29.549997 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1124 13:48:29.593521 572647 logs.go:123] Gathering logs for kubelet ...
I1124 13:48:29.593560 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1124 13:48:29.681751 572647 logs.go:123] Gathering logs for dmesg ...
I1124 13:48:29.681792 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1124 13:48:29.699198 572647 logs.go:123] Gathering logs for describe nodes ...
I1124 13:48:29.699232 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1124 13:48:29.759823 572647 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1124 13:48:29.759850 572647 logs.go:123] Gathering logs for kube-apiserver [6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3] ...
I1124 13:48:29.759863 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 6700c126fd327c2e159d0faade33f59514f89b0a53de7e75c697f3b9b2c2f3b3"
I1124 13:48:29.798497 572647 logs.go:123] Gathering logs for kube-scheduler [9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2] ...
I1124 13:48:29.798534 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 9339d42ee555f7da1cb5ae94cf3bc22b2f3744f2ca5e2dfd459c4212a28774e2"
I1124 13:48:29.835677 572647 logs.go:123] Gathering logs for kube-controller-manager [a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604] ...
I1124 13:48:29.835718 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a8454ffcf0213ebcba100cbad1da47ec4105f1be4ce6ed2911d3997ae6994604"
I1124 13:48:29.864876 572647 logs.go:123] Gathering logs for container status ...
I1124 13:48:29.864923 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1124 13:48:29.898153 572647 logs.go:123] Gathering logs for kube-apiserver [707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce] ...
I1124 13:48:29.898186 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 707b1dc8c22b4ecacc52d048e892a6c42437f5e5e64949cdb7dc0b6ffad3a6ce"
I1124 13:48:29.932035 572647 logs.go:123] Gathering logs for etcd [856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72] ...
I1124 13:48:29.932073 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 856aed50c704fa89134428f4365f3461d5d97f7b0f6e82094b1cba4928ec0c72"
I1124 13:48:29.971224 572647 logs.go:123] Gathering logs for kube-scheduler [8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd] ...
I1124 13:48:29.971258 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 8249c9dabc6b89efb0dd079b97d069b96667a655efa778d1d719e24d6ec100fd"
I1124 13:48:30.026576 572647 logs.go:123] Gathering logs for kube-controller-manager [89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b] ...
I1124 13:48:30.026619 572647 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89dffe66574edd9221074d8edcc51ee3d2cf2497cf9a3bd0e007560447aaa97b"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
b44a9a38266a3 56cc512116c8f 10 seconds ago Running busybox 0 91e7e42c593d0 busybox default
8d4a4dd9d6632 ead0a4a53df89 16 seconds ago Running coredns 0 1c930bc4d6523 coredns-5dd5756b68-b5rrl kube-system
c9c8f51adb6bb 6e38f40d628db 16 seconds ago Running storage-provisioner 0 840fae773d68e storage-provisioner kube-system
1dab1df16e654 409467f978b4a 27 seconds ago Running kindnet-cni 0 30a65fd13bcca kindnet-tpjvb kube-system
0b87cfcc163e3 ea1030da44aa1 30 seconds ago Running kube-proxy 0 555af9e11f935 kube-proxy-hzfcx kube-system
b89c098ff2cb6 bb5e0dde9054c 48 seconds ago Running kube-apiserver 0 b832e9f75c0f1 kube-apiserver-old-k8s-version-513442 kube-system
f7663d3953f0e 4be79c38a4bab 48 seconds ago Running kube-controller-manager 0 06bb689695cce kube-controller-manager-old-k8s-version-513442 kube-system
bdd5c20173350 f6f496300a2ae 48 seconds ago Running kube-scheduler 0 ac1efcdb81d0e kube-scheduler-old-k8s-version-513442 kube-system
5793c7fd11b5c 73deb9a3f7025 49 seconds ago Running etcd 0 3c4129b98c0d7 etcd-old-k8s-version-513442 kube-system
==> containerd <==
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.636050137Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-b5rrl,Uid:4e6c9b7c-5f0a-4c60-8197-20e985a07403,Namespace:kube-system,Attempt:0,} returns sandbox id \"1c930bc4d6523dcc2ff99c9243131fcf23dfc7881b09c013bf55e68b23ecf25e\""
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.639799945Z" level=info msg="CreateContainer within sandbox \"1c930bc4d6523dcc2ff99c9243131fcf23dfc7881b09c013bf55e68b23ecf25e\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.648881001Z" level=info msg="Container 8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89: CDI devices from CRI Config.CDIDevices: []"
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.657829357Z" level=info msg="CreateContainer within sandbox \"1c930bc4d6523dcc2ff99c9243131fcf23dfc7881b09c013bf55e68b23ecf25e\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89\""
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.658662420Z" level=info msg="StartContainer for \"8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89\""
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.659800869Z" level=info msg="connecting to shim 8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89" address="unix:///run/containerd/s/c69a9b00491bdefff20b5fba21aa1d556fb9c3a3bad974c8b8be870ca95e072b" protocol=ttrpc version=3
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.704634320Z" level=info msg="StartContainer for \"c9c8f51adb6bbca8e0f954ad9082c0c66235dce129e152dd682ab69622b44aac\" returns successfully"
Nov 24 13:48:19 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:19.716701551Z" level=info msg="StartContainer for \"8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89\" returns successfully"
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.659740340Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:e21ee73b-578f-48c9-826d-ab3b4bbb7871,Namespace:default,Attempt:0,}"
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.705643585Z" level=info msg="connecting to shim 91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff" address="unix:///run/containerd/s/a6973921fa6bbb987fab0736637648be3dc3e077c5046184370bd0c127ef00c4" namespace=k8s.io protocol=ttrpc version=3
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.781316455Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:e21ee73b-578f-48c9-826d-ab3b4bbb7871,Namespace:default,Attempt:0,} returns sandbox id \"91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff\""
Nov 24 13:48:22 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:22.783364521Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.550927147Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.551949670Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396647"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.553332639Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.555518804Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.555999909Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.772594905s"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.556037581Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.557958127Z" level=info msg="CreateContainer within sandbox \"91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.566156418Z" level=info msg="Container b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605: CDI devices from CRI Config.CDIDevices: []"
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.572811164Z" level=info msg="CreateContainer within sandbox \"91e7e42c593d0f49381ba051fa95a3bffc3c2fedf4ee572f1ee3e65a03cebfff\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.573543998Z" level=info msg="StartContainer for \"b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605\""
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.574401159Z" level=info msg="connecting to shim b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605" address="unix:///run/containerd/s/a6973921fa6bbb987fab0736637648be3dc3e077c5046184370bd0c127ef00c4" protocol=ttrpc version=3
Nov 24 13:48:25 old-k8s-version-513442 containerd[663]: time="2025-11-24T13:48:25.628848926Z" level=info msg="StartContainer for \"b44a9a38266a36367dda4e29d517101d0bad25018140ed3049b32babe692f605\" returns successfully"
Nov 24 13:48:32 old-k8s-version-513442 containerd[663]: E1124 13:48:32.433506 663 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [8d4a4dd9d6632a607a007a0e131e676696c4d059874b38cd47f762f53926ad89] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 4c7f44b73086be760ec9e64204f63c5cc5a952c8c1c55ba0b41d8fc3315ce3c7d0259d04847cb8b4561043d4549603f3bccfd9b397eeb814eef159d244d26f39
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:57003 - 26434 "HINFO IN 1735205229727733014.6660763770011463869. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.021751094s
==> describe nodes <==
Name: old-k8s-version-513442
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-513442
kubernetes.io/os=linux
minikube.k8s.io/commit=b5d1c9f4e75f4e638a533695fd62619949cefcab
minikube.k8s.io/name=old-k8s-version-513442
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_24T13_47_52_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 24 Nov 2025 13:47:48 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-513442
AcquireTime: <unset>
RenewTime: Mon, 24 Nov 2025 13:48:32 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:47:47 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:47:47 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:47:47 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 24 Nov 2025 13:48:22 +0000 Mon, 24 Nov 2025 13:48:19 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.94.2
Hostname: old-k8s-version-513442
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863356Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: 7bc159f8-7fe0-4f8d-82dc-0cc733a1645b
Boot ID: 715d4626-373f-499b-b5de-b6d832ce4fe4
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 13s
kube-system coredns-5dd5756b68-b5rrl 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 31s
kube-system etcd-old-k8s-version-513442 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 45s
kube-system kindnet-tpjvb 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 31s
kube-system kube-apiserver-old-k8s-version-513442 250m (3%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-controller-manager-old-k8s-version-513442 200m (2%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system kube-proxy-hzfcx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31s
kube-system kube-scheduler-old-k8s-version-513442 100m (1%) 0 (0%) 0 (0%) 0 (0%) 43s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 30s kube-proxy
Normal Starting 50s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 49s (x8 over 50s) kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 49s (x8 over 50s) kubelet Node old-k8s-version-513442 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 49s (x7 over 50s) kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 49s kubelet Updated Node Allocatable limit across pods
Normal Starting 44s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 44s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 43s kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 43s kubelet Node old-k8s-version-513442 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 43s kubelet Node old-k8s-version-513442 status is now: NodeHasSufficientPID
Normal RegisteredNode 32s node-controller Node old-k8s-version-513442 event: Registered Node old-k8s-version-513442 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-513442 status is now: NodeReady
==> dmesg <==
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 0a 91 30 bc 58 af 08 06
[Nov24 12:45] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 9a fb 84 7d 9e 9e 08 06
[ +0.000332] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 0a 91 30 bc 58 af 08 06
[ +25.292047] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff da 14 b4 9b 3e 8f 08 06
[ +0.024207] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 06 8e 71 0b 76 c3 08 06
[ +16.768103] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff de 45 b6 ad fe 93 08 06
[ +5.950770] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 2e b5 4a 70 0a 35 08 06
[Nov24 12:46] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 4e 8b d0 4a da 7e 08 06
[ +0.000557] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 2e b5 4a 70 0a 35 08 06
[ +1.903671] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff c2 1f e8 fc 59 74 08 06
[ +0.000341] IPv4: martian source 10.244.0.4 from 10.244.0.3, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff da 14 b4 9b 3e 8f 08 06
[ +17.535584] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff e2 31 ec 7c 1d 38 08 06
[ +0.000426] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff de 45 b6 ad fe 93 08 06
==> etcd [5793c7fd11b5c568735219e3d193c67360dde88032a438ae332a3e12d7fdf0a5] <==
{"level":"info","ts":"2025-11-24T13:47:46.896061Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","added-peer-id":"dfc97eb0aae75b33","added-peer-peer-urls":["https://192.168.94.2:2380"]}
{"level":"info","ts":"2025-11-24T13:47:47.18298Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-24T13:47:47.183032Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-24T13:47:47.183064Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgPreVoteResp from dfc97eb0aae75b33 at term 1"}
{"level":"info","ts":"2025-11-24T13:47:47.183082Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became candidate at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.18309Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 received MsgVoteResp from dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.183102Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"dfc97eb0aae75b33 became leader at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.183112Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: dfc97eb0aae75b33 elected leader dfc97eb0aae75b33 at term 2"}
{"level":"info","ts":"2025-11-24T13:47:47.184166Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"dfc97eb0aae75b33","local-member-attributes":"{Name:old-k8s-version-513442 ClientURLs:[https://192.168.94.2:2379]}","request-path":"/0/members/dfc97eb0aae75b33/attributes","cluster-id":"da400bbece288f5a","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-24T13:47:47.184441Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T13:47:47.184423Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-24T13:47:47.184639Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.184677Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-24T13:47:47.184697Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-24T13:47:47.185356Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"da400bbece288f5a","local-member-id":"dfc97eb0aae75b33","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.185462Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.185485Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-24T13:47:47.186127Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.94.2:2379"}
{"level":"info","ts":"2025-11-24T13:47:47.186272Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-24T13:48:02.673385Z","caller":"traceutil/trace.go:171","msg":"trace[456960560] linearizableReadLoop","detail":"{readStateIndex:331; appliedIndex:330; }","duration":"136.421105ms","start":"2025-11-24T13:48:02.536946Z","end":"2025-11-24T13:48:02.673367Z","steps":["trace[456960560] 'read index received' (duration: 136.248358ms)","trace[456960560] 'applied index is now lower than readState.Index' (duration: 171.987µs)"],"step_count":2}
{"level":"warn","ts":"2025-11-24T13:48:02.673673Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"136.721804ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:351"}
{"level":"info","ts":"2025-11-24T13:48:02.67373Z","caller":"traceutil/trace.go:171","msg":"trace[286257082] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:319; }","duration":"136.809717ms","start":"2025-11-24T13:48:02.536907Z","end":"2025-11-24T13:48:02.673717Z","steps":["trace[286257082] 'agreement among raft nodes before linearized reading' (duration: 136.690513ms)"],"step_count":1}
{"level":"info","ts":"2025-11-24T13:48:02.673851Z","caller":"traceutil/trace.go:171","msg":"trace[2009156990] transaction","detail":"{read_only:false; response_revision:319; number_of_response:1; }","duration":"168.350659ms","start":"2025-11-24T13:48:02.505481Z","end":"2025-11-24T13:48:02.673832Z","steps":["trace[2009156990] 'process raft request' (duration: 167.775897ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-24T13:48:02.673811Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"132.836489ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/default/default\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-24T13:48:02.673892Z","caller":"traceutil/trace.go:171","msg":"trace[1422014017] range","detail":"{range_begin:/registry/serviceaccounts/default/default; range_end:; response_count:0; response_revision:319; }","duration":"132.929171ms","start":"2025-11-24T13:48:02.54095Z","end":"2025-11-24T13:48:02.673879Z","steps":["trace[1422014017] 'agreement among raft nodes before linearized reading' (duration: 132.804065ms)"],"step_count":1}
==> kernel <==
13:48:36 up 2:30, 0 user, load average: 2.03, 2.80, 1.92
Linux old-k8s-version-513442 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [1dab1df16e654e8d2bf5248f41d4e61a9922afd9e9aa99eb10b51ff76d83fd27] <==
I1124 13:48:08.805828 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1124 13:48:08.806157 1 main.go:139] hostIP = 192.168.94.2
podIP = 192.168.94.2
I1124 13:48:08.806325 1 main.go:148] setting mtu 1500 for CNI
I1124 13:48:08.806347 1 main.go:178] kindnetd IP family: "ipv4"
I1124 13:48:08.806366 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-24T13:48:09Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1124 13:48:09.065201 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1124 13:48:09.065237 1 controller.go:381] "Waiting for informer caches to sync"
I1124 13:48:09.065250 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1124 13:48:09.205219 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1124 13:48:09.465641 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1124 13:48:09.465667 1 metrics.go:72] Registering metrics
I1124 13:48:09.465726 1 controller.go:711] "Syncing nftables rules"
I1124 13:48:19.068504 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1124 13:48:19.068576 1 main.go:301] handling current node
I1124 13:48:29.065440 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}]
I1124 13:48:29.065473 1 main.go:301] handling current node
==> kube-apiserver [b89c098ff2cb630c37cf57f5061688d52a419284b629da3305843a9dee1a5dbb] <==
I1124 13:47:48.951700 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1124 13:47:48.951970 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1124 13:47:48.951984 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1124 13:47:48.952108 1 shared_informer.go:318] Caches are synced for crd-autoregister
I1124 13:47:48.952141 1 aggregator.go:166] initial CRD sync complete...
I1124 13:47:48.952149 1 autoregister_controller.go:141] Starting autoregister controller
I1124 13:47:48.952156 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1124 13:47:48.952165 1 cache.go:39] Caches are synced for autoregister controller
I1124 13:47:48.953986 1 controller.go:624] quota admission added evaluator for: namespaces
I1124 13:47:49.152644 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1124 13:47:49.858204 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1124 13:47:49.862657 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1124 13:47:49.862682 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1124 13:47:50.422560 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1124 13:47:50.472548 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1124 13:47:50.570004 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1124 13:47:50.579741 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.94.2]
I1124 13:47:50.580884 1 controller.go:624] quota admission added evaluator for: endpoints
I1124 13:47:50.586999 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1124 13:47:50.885484 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1124 13:47:51.864040 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1124 13:47:51.877619 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1124 13:47:51.890804 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1124 13:48:04.597347 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1124 13:48:04.651565 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [f7663d3953f0ee1aca9b8f557f4e81791e15502a0a6447b494d2035c4c9b2dfc] <==
I1124 13:48:03.884906 1 shared_informer.go:318] Caches are synced for deployment
I1124 13:48:03.932363 1 shared_informer.go:318] Caches are synced for resource quota
I1124 13:48:03.941297 1 shared_informer.go:318] Caches are synced for resource quota
I1124 13:48:04.243318 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 13:48:04.243355 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1124 13:48:04.258877 1 shared_informer.go:318] Caches are synced for garbage collector
I1124 13:48:04.607851 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-hzfcx"
I1124 13:48:04.611600 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-tpjvb"
I1124 13:48:04.656277 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1124 13:48:04.748220 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-bcd4m"
I1124 13:48:04.756616 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-b5rrl"
I1124 13:48:04.767398 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="111.018323ms"
I1124 13:48:04.782835 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="15.361034ms"
I1124 13:48:04.782967 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="91.68µs"
I1124 13:48:04.940856 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1124 13:48:04.951934 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-bcd4m"
I1124 13:48:04.962829 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="21.807545ms"
I1124 13:48:04.970616 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.726674ms"
I1124 13:48:04.970784 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="88.42µs"
I1124 13:48:19.202453 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="121.753µs"
I1124 13:48:19.220547 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="97.147µs"
I1124 13:48:20.044339 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="114.847µs"
I1124 13:48:20.080458 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.482374ms"
I1124 13:48:20.080575 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="70.63µs"
I1124 13:48:23.770117 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-proxy [0b87cfcc163e379c4e72aa8c64739d9d13a801c140b5fabe7cbbc11022cfd12a] <==
I1124 13:48:05.277959 1 server_others.go:69] "Using iptables proxy"
I1124 13:48:05.288147 1 node.go:141] Successfully retrieved node IP: 192.168.94.2
I1124 13:48:05.312455 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1124 13:48:05.315014 1 server_others.go:152] "Using iptables Proxier"
I1124 13:48:05.315055 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1124 13:48:05.315064 1 server_others.go:438] "Defaulting to no-op detect-local"
I1124 13:48:05.315106 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1124 13:48:05.315978 1 server.go:846] "Version info" version="v1.28.0"
I1124 13:48:05.316072 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1124 13:48:05.317668 1 config.go:188] "Starting service config controller"
I1124 13:48:05.317713 1 shared_informer.go:311] Waiting for caches to sync for service config
I1124 13:48:05.317754 1 config.go:315] "Starting node config controller"
I1124 13:48:05.317762 1 shared_informer.go:311] Waiting for caches to sync for node config
I1124 13:48:05.318091 1 config.go:97] "Starting endpoint slice config controller"
I1124 13:48:05.318114 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1124 13:48:05.418055 1 shared_informer.go:318] Caches are synced for service config
I1124 13:48:05.418104 1 shared_informer.go:318] Caches are synced for node config
I1124 13:48:05.419230 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [bdd5c20173350449ff23a9ee9a791fe034c518afc7784448209ad9b0a5c32a9f] <==
W1124 13:47:49.773882 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1124 13:47:49.773941 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1124 13:47:49.817194 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1124 13:47:49.817241 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W1124 13:47:49.898465 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1124 13:47:49.898514 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W1124 13:47:49.973231 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1124 13:47:49.973807 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1124 13:47:49.975515 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1124 13:47:49.975624 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W1124 13:47:50.044243 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1124 13:47:50.044284 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1124 13:47:50.065787 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1124 13:47:50.065828 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1124 13:47:50.067051 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1124 13:47:50.067084 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1124 13:47:50.088454 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1124 13:47:50.088492 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W1124 13:47:50.094062 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1124 13:47:50.094103 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1124 13:47:50.176377 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1124 13:47:50.176425 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1124 13:47:50.188050 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1124 13:47:50.188094 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
I1124 13:47:51.410574 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 24 13:48:03 old-k8s-version-513442 kubelet[1521]: I1124 13:48:03.736815 1521 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.621236 1521 topology_manager.go:215] "Topology Admit Handler" podUID="f4ba208a-1a78-46ae-9684-ff3309400852" podNamespace="kube-system" podName="kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.628198 1521 topology_manager.go:215] "Topology Admit Handler" podUID="c7df115a-8394-4f80-ac6c-5b1fc95337b5" podNamespace="kube-system" podName="kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.701758 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/c7df115a-8394-4f80-ac6c-5b1fc95337b5-xtables-lock\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702003 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cdcx\" (UniqueName: \"kubernetes.io/projected/f4ba208a-1a78-46ae-9684-ff3309400852-kube-api-access-6cdcx\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702157 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/c7df115a-8394-4f80-ac6c-5b1fc95337b5-cni-cfg\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702290 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c7df115a-8394-4f80-ac6c-5b1fc95337b5-lib-modules\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702379 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnddq\" (UniqueName: \"kubernetes.io/projected/c7df115a-8394-4f80-ac6c-5b1fc95337b5-kube-api-access-cnddq\") pod \"kindnet-tpjvb\" (UID: \"c7df115a-8394-4f80-ac6c-5b1fc95337b5\") " pod="kube-system/kindnet-tpjvb"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702452 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/f4ba208a-1a78-46ae-9684-ff3309400852-kube-proxy\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702483 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/f4ba208a-1a78-46ae-9684-ff3309400852-xtables-lock\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:04 old-k8s-version-513442 kubelet[1521]: I1124 13:48:04.702513 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/f4ba208a-1a78-46ae-9684-ff3309400852-lib-modules\") pod \"kube-proxy-hzfcx\" (UID: \"f4ba208a-1a78-46ae-9684-ff3309400852\") " pod="kube-system/kube-proxy-hzfcx"
Nov 24 13:48:06 old-k8s-version-513442 kubelet[1521]: I1124 13:48:06.009542 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-hzfcx" podStartSLOduration=2.00948849 podCreationTimestamp="2025-11-24 13:48:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:48:06.009255456 +0000 UTC m=+14.175181609" watchObservedRunningTime="2025-11-24 13:48:06.00948849 +0000 UTC m=+14.175414641"
Nov 24 13:48:09 old-k8s-version-513442 kubelet[1521]: I1124 13:48:09.017801 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-tpjvb" podStartSLOduration=2.028995374 podCreationTimestamp="2025-11-24 13:48:04 +0000 UTC" firstStartedPulling="2025-11-24 13:48:05.423030434 +0000 UTC m=+13.588956573" lastFinishedPulling="2025-11-24 13:48:08.411777827 +0000 UTC m=+16.577703968" observedRunningTime="2025-11-24 13:48:09.017454231 +0000 UTC m=+17.183380385" watchObservedRunningTime="2025-11-24 13:48:09.017742769 +0000 UTC m=+17.183668923"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.126026 1521 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.199313 1521 topology_manager.go:215] "Topology Admit Handler" podUID="65efb270-100a-4e7c-bee8-24de1df28586" podNamespace="kube-system" podName="storage-provisioner"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.202110 1521 topology_manager.go:215] "Topology Admit Handler" podUID="4e6c9b7c-5f0a-4c60-8197-20e985a07403" podNamespace="kube-system" podName="coredns-5dd5756b68-b5rrl"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.296963 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84ccn\" (UniqueName: \"kubernetes.io/projected/65efb270-100a-4e7c-bee8-24de1df28586-kube-api-access-84ccn\") pod \"storage-provisioner\" (UID: \"65efb270-100a-4e7c-bee8-24de1df28586\") " pod="kube-system/storage-provisioner"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.297219 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/65efb270-100a-4e7c-bee8-24de1df28586-tmp\") pod \"storage-provisioner\" (UID: \"65efb270-100a-4e7c-bee8-24de1df28586\") " pod="kube-system/storage-provisioner"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.297296 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj4xm\" (UniqueName: \"kubernetes.io/projected/4e6c9b7c-5f0a-4c60-8197-20e985a07403-kube-api-access-sj4xm\") pod \"coredns-5dd5756b68-b5rrl\" (UID: \"4e6c9b7c-5f0a-4c60-8197-20e985a07403\") " pod="kube-system/coredns-5dd5756b68-b5rrl"
Nov 24 13:48:19 old-k8s-version-513442 kubelet[1521]: I1124 13:48:19.297327 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e6c9b7c-5f0a-4c60-8197-20e985a07403-config-volume\") pod \"coredns-5dd5756b68-b5rrl\" (UID: \"4e6c9b7c-5f0a-4c60-8197-20e985a07403\") " pod="kube-system/coredns-5dd5756b68-b5rrl"
Nov 24 13:48:20 old-k8s-version-513442 kubelet[1521]: I1124 13:48:20.055454 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-b5rrl" podStartSLOduration=16.055384325 podCreationTimestamp="2025-11-24 13:48:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:48:20.043996165 +0000 UTC m=+28.209922315" watchObservedRunningTime="2025-11-24 13:48:20.055384325 +0000 UTC m=+28.221310494"
Nov 24 13:48:20 old-k8s-version-513442 kubelet[1521]: I1124 13:48:20.072835 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=15.072769008 podCreationTimestamp="2025-11-24 13:48:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 13:48:20.05633827 +0000 UTC m=+28.222264421" watchObservedRunningTime="2025-11-24 13:48:20.072769008 +0000 UTC m=+28.238695171"
Nov 24 13:48:22 old-k8s-version-513442 kubelet[1521]: I1124 13:48:22.349894 1521 topology_manager.go:215] "Topology Admit Handler" podUID="e21ee73b-578f-48c9-826d-ab3b4bbb7871" podNamespace="default" podName="busybox"
Nov 24 13:48:22 old-k8s-version-513442 kubelet[1521]: I1124 13:48:22.417169 1521 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmgg8\" (UniqueName: \"kubernetes.io/projected/e21ee73b-578f-48c9-826d-ab3b4bbb7871-kube-api-access-mmgg8\") pod \"busybox\" (UID: \"e21ee73b-578f-48c9-826d-ab3b4bbb7871\") " pod="default/busybox"
Nov 24 13:48:26 old-k8s-version-513442 kubelet[1521]: I1124 13:48:26.061183 1521 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/busybox" podStartSLOduration=1.287793929 podCreationTimestamp="2025-11-24 13:48:22 +0000 UTC" firstStartedPulling="2025-11-24 13:48:22.783005961 +0000 UTC m=+30.948932098" lastFinishedPulling="2025-11-24 13:48:25.556333595 +0000 UTC m=+33.722259740" observedRunningTime="2025-11-24 13:48:26.061015161 +0000 UTC m=+34.226941311" watchObservedRunningTime="2025-11-24 13:48:26.061121571 +0000 UTC m=+34.227047722"
==> storage-provisioner [c9c8f51adb6bbca8e0f954ad9082c0c66235dce129e152dd682ab69622b44aac] <==
I1124 13:48:19.713946 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1124 13:48:19.725060 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1124 13:48:19.725122 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1124 13:48:19.732798 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1124 13:48:19.733028 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-513442_df294b40-30a6-4b8c-83ff-3d897f2504d8!
I1124 13:48:19.733030 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"938f90ea-7103-4290-984c-f5e7c1aae849", APIVersion:"v1", ResourceVersion:"443", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-513442_df294b40-30a6-4b8c-83ff-3d897f2504d8 became leader
I1124 13:48:19.833675 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-513442_df294b40-30a6-4b8c-83ff-3d897f2504d8!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-513442 -n old-k8s-version-513442
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-513442 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (14.74s)