=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-204346 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [85a1fcd5-ee10-4749-9dec-40efed82eb3e] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [85a1fcd5-ee10-4749-9dec-40efed82eb3e] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 10.002934355s
start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-204346 exec busybox -- /bin/sh -c "ulimit -n"
start_stop_delete_test.go:194: 'ulimit -n' returned 1024, expected 1048576
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-204346
helpers_test.go:243: (dbg) docker inspect old-k8s-version-204346:
-- stdout --
[
{
"Id": "74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716",
"Created": "2025-11-23T08:43:13.914336238Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 255015,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-23T08:43:13.954859222Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/hostname",
"HostsPath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/hosts",
"LogPath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716-json.log",
"Name": "/old-k8s-version-204346",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-204346:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-204346",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716",
"LowerDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3-init/diff:/var/lib/docker/overlay2/ee04ca8b85d0dedeb02bd9a5189a59a7f53ca89a011d262a78df32fa43bf0598/diff",
"MergedDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3/merged",
"UpperDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3/diff",
"WorkDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "old-k8s-version-204346",
"Source": "/var/lib/docker/volumes/old-k8s-version-204346/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "old-k8s-version-204346",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-204346",
"name.minikube.sigs.k8s.io": "old-k8s-version-204346",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "db03bea2ae002bb3595102e41f0b3c5dd373e7f121cbf490c03f867ac8b10fc2",
"SandboxKey": "/var/run/docker/netns/db03bea2ae00",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-204346": {
"IPAMConfig": {
"IPv4Address": "192.168.103.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "2c3268f545c0648cec3972c75676102d767b9cbd699aea51b301ba1de04cad51",
"EndpointID": "a6fed4b2c7bb6c663b8e774c8e64911b07fef263695c45641973d777a7144fb2",
"Gateway": "192.168.103.1",
"IPAddress": "192.168.103.2",
"MacAddress": "1a:83:9b:a0:7e:0e",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-204346",
"74b9ec686773"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-204346 -n old-k8s-version-204346
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-204346 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-204346 logs -n 25: (1.058694638s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ start │ -p force-systemd-flag-570956 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-570956 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p NoKubernetes-846693 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ ssh │ -p NoKubernetes-846693 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ │
│ ssh │ force-systemd-env-352249 ssh cat /etc/containerd/config.toml │ force-systemd-env-352249 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ delete │ -p force-systemd-env-352249 │ force-systemd-env-352249 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p cert-expiration-680868 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-680868 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ force-systemd-flag-570956 ssh cat /etc/containerd/config.toml │ force-systemd-flag-570956 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ delete │ -p force-systemd-flag-570956 │ force-systemd-flag-570956 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p cert-options-194967 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
│ stop │ -p NoKubernetes-846693 │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p NoKubernetes-846693 --driver=docker --container-runtime=containerd │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ -p NoKubernetes-846693 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ │
│ delete │ -p NoKubernetes-846693 │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p missing-upgrade-231159 --memory=3072 --driver=docker --container-runtime=containerd │ missing-upgrade-231159 │ jenkins │ v1.32.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ cert-options-194967 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ -p cert-options-194967 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ delete │ -p cert-options-194967 │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p stopped-upgrade-595653 --memory=3072 --vm-driver=docker --container-runtime=containerd │ stopped-upgrade-595653 │ jenkins │ v1.32.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p missing-upgrade-231159 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ missing-upgrade-231159 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:43 UTC │
│ stop │ stopped-upgrade-595653 stop │ stopped-upgrade-595653 │ jenkins │ v1.32.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p stopped-upgrade-595653 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ stopped-upgrade-595653 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:43 UTC │
│ delete │ -p stopped-upgrade-595653 │ stopped-upgrade-595653 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ 23 Nov 25 08:43 UTC │
│ start │ -p old-k8s-version-204346 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-204346 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ 23 Nov 25 08:43 UTC │
│ delete │ -p missing-upgrade-231159 │ missing-upgrade-231159 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ 23 Nov 25 08:43 UTC │
│ start │ -p no-preload-999106 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-999106 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/23 08:43:27
Running on machine: ubuntu-20-agent-8
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 08:43:27.495640 258086 out.go:360] Setting OutFile to fd 1 ...
I1123 08:43:27.495743 258086 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:43:27.495751 258086 out.go:374] Setting ErrFile to fd 2...
I1123 08:43:27.495755 258086 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:43:27.495953 258086 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21969-13876/.minikube/bin
I1123 08:43:27.496394 258086 out.go:368] Setting JSON to false
I1123 08:43:27.497504 258086 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-8","uptime":5148,"bootTime":1763882259,"procs":304,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1123 08:43:27.497559 258086 start.go:143] virtualization: kvm guest
I1123 08:43:27.499449 258086 out.go:179] * [no-preload-999106] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1123 08:43:27.500767 258086 notify.go:221] Checking for updates...
I1123 08:43:27.500781 258086 out.go:179] - MINIKUBE_LOCATION=21969
I1123 08:43:27.502005 258086 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 08:43:27.503191 258086 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21969-13876/kubeconfig
I1123 08:43:27.504274 258086 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21969-13876/.minikube
I1123 08:43:27.505281 258086 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1123 08:43:27.506287 258086 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 08:43:27.507765 258086 config.go:182] Loaded profile config "cert-expiration-680868": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:43:27.507859 258086 config.go:182] Loaded profile config "kubernetes-upgrade-776670": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:43:27.507939 258086 config.go:182] Loaded profile config "old-k8s-version-204346": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:43:27.508012 258086 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 08:43:27.532390 258086 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1123 08:43:27.532462 258086 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:43:27.588863 258086 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:67 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-23 08:43:27.578321532 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 08:43:27.588959 258086 docker.go:319] overlay module found
I1123 08:43:27.590837 258086 out.go:179] * Using the docker driver based on user configuration
I1123 08:43:27.592139 258086 start.go:309] selected driver: docker
I1123 08:43:27.592164 258086 start.go:927] validating driver "docker" against <nil>
I1123 08:43:27.592175 258086 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 08:43:27.592773 258086 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:43:27.653421 258086 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:64 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-23 08:43:27.643267927 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 08:43:27.653668 258086 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1123 08:43:27.653954 258086 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:43:27.655624 258086 out.go:179] * Using Docker driver with root privileges
I1123 08:43:27.656995 258086 cni.go:84] Creating CNI manager for ""
I1123 08:43:27.657071 258086 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:27.657084 258086 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1123 08:43:27.657159 258086 start.go:353] cluster config:
{Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:43:27.658480 258086 out.go:179] * Starting "no-preload-999106" primary control-plane node in "no-preload-999106" cluster
I1123 08:43:27.659678 258086 cache.go:134] Beginning downloading kic base image for docker with containerd
I1123 08:43:27.660749 258086 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1123 08:43:27.661680 258086 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 08:43:27.661748 258086 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1123 08:43:27.661771 258086 profile.go:143] Saving config to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/config.json ...
I1123 08:43:27.661801 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/config.json: {Name:mk1854d74e572dba5e78564093e1183622e9aa74 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:27.661927 258086 cache.go:107] acquiring lock: {Name:mka7418a84f8d9aaa890eb7bcafd158f0f845949 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.661970 258086 cache.go:107] acquiring lock: {Name:mke646091201bbef396ff67d16f0cce49990b355 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.661948 258086 cache.go:107] acquiring lock: {Name:mk929bb8e7363fd9f8d602565b078a816979b3d2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.661979 258086 cache.go:107] acquiring lock: {Name:mk667c169463661b7e999b395cc2d348440d0d0f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662058 258086 cache.go:115] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1123 08:43:27.662070 258086 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:27.662087 258086 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:27.662069 258086 cache.go:107] acquiring lock: {Name:mk4a8ffda79c57b59d9ec0be62cf6989cc0b3dc8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662104 258086 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:27.662089 258086 cache.go:107] acquiring lock: {Name:mkce85e18a9851767cd13073008b6382df083ea3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662080 258086 cache.go:107] acquiring lock: {Name:mk495076811ea27b7ee848ef73ebf58029c788de Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662200 258086 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:27.662257 258086 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:27.662073 258086 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 113.368µs
I1123 08:43:27.662298 258086 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1123 08:43:27.662298 258086 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:27.662338 258086 cache.go:107] acquiring lock: {Name:mkc513b15aec17d5c3e77aa2e6131827198f8c26 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662430 258086 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1123 08:43:27.663312 258086 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:27.663446 258086 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:27.663495 258086 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:27.663529 258086 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:27.663560 258086 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1123 08:43:27.663553 258086 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:27.663602 258086 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:27.683115 258086 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1123 08:43:27.683133 258086 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1123 08:43:27.683151 258086 cache.go:243] Successfully downloaded all kic artifacts
I1123 08:43:27.683188 258086 start.go:360] acquireMachinesLock for no-preload-999106: {Name:mk535dea2e363deaa61ac9c5041ac2d499c9efc5 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.683286 258086 start.go:364] duration metric: took 77.877µs to acquireMachinesLock for "no-preload-999106"
I1123 08:43:27.683314 258086 start.go:93] Provisioning new machine with config: &{Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:43:27.683378 258086 start.go:125] createHost starting for "" (driver="docker")
I1123 08:43:23.886201 254114 out.go:252] - Booting up control plane ...
I1123 08:43:23.886286 254114 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:43:23.886377 254114 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:43:23.886992 254114 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:43:23.903197 254114 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:43:23.904138 254114 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:43:23.904196 254114 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:43:24.010365 254114 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1123 08:43:28.512514 254114 kubeadm.go:319] [apiclient] All control plane components are healthy after 4.502224 seconds
I1123 08:43:28.512707 254114 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:43:28.525209 254114 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:43:29.051871 254114 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:43:29.052189 254114 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-204346 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:43:29.563746 254114 kubeadm.go:319] [bootstrap-token] Using token: kv40xr.vpl4w4wq1fqvcjbv
I1123 08:43:29.565119 254114 out.go:252] - Configuring RBAC rules ...
I1123 08:43:29.565274 254114 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:43:29.570668 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:43:29.578425 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:43:29.581516 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:43:29.584593 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:43:29.588395 254114 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:43:29.599565 254114 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:43:29.809875 254114 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:43:29.974613 254114 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:43:29.975627 254114 kubeadm.go:319]
I1123 08:43:29.975755 254114 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:43:29.975777 254114 kubeadm.go:319]
I1123 08:43:29.975879 254114 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:43:29.975889 254114 kubeadm.go:319]
I1123 08:43:29.975929 254114 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:43:29.976013 254114 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:43:29.976095 254114 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:43:29.976109 254114 kubeadm.go:319]
I1123 08:43:29.976189 254114 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:43:29.976197 254114 kubeadm.go:319]
I1123 08:43:29.976265 254114 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:43:29.976274 254114 kubeadm.go:319]
I1123 08:43:29.976365 254114 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:43:29.976483 254114 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:43:29.976577 254114 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:43:29.976584 254114 kubeadm.go:319]
I1123 08:43:29.976725 254114 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:43:29.976849 254114 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:43:29.976864 254114 kubeadm.go:319]
I1123 08:43:29.976980 254114 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token kv40xr.vpl4w4wq1fqvcjbv \
I1123 08:43:29.977124 254114 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa \
I1123 08:43:29.977157 254114 kubeadm.go:319] --control-plane
I1123 08:43:29.977166 254114 kubeadm.go:319]
I1123 08:43:29.977310 254114 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:43:29.977319 254114 kubeadm.go:319]
I1123 08:43:29.977452 254114 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token kv40xr.vpl4w4wq1fqvcjbv \
I1123 08:43:29.977614 254114 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa
I1123 08:43:29.980159 254114 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1123 08:43:29.980378 254114 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:43:29.980409 254114 cni.go:84] Creating CNI manager for ""
I1123 08:43:29.980425 254114 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:29.984213 254114 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1123 08:43:27.685925 258086 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1123 08:43:27.686123 258086 start.go:159] libmachine.API.Create for "no-preload-999106" (driver="docker")
I1123 08:43:27.686177 258086 client.go:173] LocalClient.Create starting
I1123 08:43:27.686233 258086 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem
I1123 08:43:27.686260 258086 main.go:143] libmachine: Decoding PEM data...
I1123 08:43:27.686276 258086 main.go:143] libmachine: Parsing certificate...
I1123 08:43:27.686316 258086 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem
I1123 08:43:27.686334 258086 main.go:143] libmachine: Decoding PEM data...
I1123 08:43:27.686346 258086 main.go:143] libmachine: Parsing certificate...
I1123 08:43:27.686738 258086 cli_runner.go:164] Run: docker network inspect no-preload-999106 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1123 08:43:27.705175 258086 cli_runner.go:211] docker network inspect no-preload-999106 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1123 08:43:27.705249 258086 network_create.go:284] running [docker network inspect no-preload-999106] to gather additional debugging logs...
I1123 08:43:27.705267 258086 cli_runner.go:164] Run: docker network inspect no-preload-999106
W1123 08:43:27.723756 258086 cli_runner.go:211] docker network inspect no-preload-999106 returned with exit code 1
I1123 08:43:27.723782 258086 network_create.go:287] error running [docker network inspect no-preload-999106]: docker network inspect no-preload-999106: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-999106 not found
I1123 08:43:27.723796 258086 network_create.go:289] output of [docker network inspect no-preload-999106]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-999106 not found
** /stderr **
I1123 08:43:27.723894 258086 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:43:27.742266 258086 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-5d8b9fdde185 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:76:1f:2b:8a:58:68} reservation:<nil>}
I1123 08:43:27.742817 258086 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-103255eb2e92 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:3a:bb:33:85:24:bc} reservation:<nil>}
I1123 08:43:27.743314 258086 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-fa9f597fddc6 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:b6:bb:01:5e:01:61} reservation:<nil>}
I1123 08:43:27.743832 258086 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-da43b5ed9d8a IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:8e:fe:29:08:73:55} reservation:<nil>}
I1123 08:43:27.744448 258086 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001c01e10}
I1123 08:43:27.744470 258086 network_create.go:124] attempt to create docker network no-preload-999106 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1123 08:43:27.744518 258086 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-999106 no-preload-999106
I1123 08:43:27.793693 258086 network_create.go:108] docker network no-preload-999106 192.168.85.0/24 created
I1123 08:43:27.793726 258086 kic.go:121] calculated static IP "192.168.85.2" for the "no-preload-999106" container
I1123 08:43:27.793798 258086 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1123 08:43:27.815508 258086 cli_runner.go:164] Run: docker volume create no-preload-999106 --label name.minikube.sigs.k8s.io=no-preload-999106 --label created_by.minikube.sigs.k8s.io=true
I1123 08:43:27.836788 258086 oci.go:103] Successfully created a docker volume no-preload-999106
I1123 08:43:27.836929 258086 cli_runner.go:164] Run: docker run --rm --name no-preload-999106-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-999106 --entrypoint /usr/bin/test -v no-preload-999106:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1123 08:43:27.851417 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1123 08:43:27.858908 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1123 08:43:27.860347 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1123 08:43:27.863442 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1123 08:43:27.865314 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1123 08:43:27.878248 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1123 08:43:27.889986 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1123 08:43:27.973948 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1123 08:43:27.973981 258086 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 311.645455ms
I1123 08:43:27.973999 258086 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1123 08:43:28.304822 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1123 08:43:28.304856 258086 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 642.854298ms
I1123 08:43:28.304870 258086 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1123 08:43:28.332384 258086 oci.go:107] Successfully prepared a docker volume no-preload-999106
I1123 08:43:28.332436 258086 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1123 08:43:28.332544 258086 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1123 08:43:28.332582 258086 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1123 08:43:28.332628 258086 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1123 08:43:28.401507 258086 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-999106 --name no-preload-999106 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-999106 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-999106 --network no-preload-999106 --ip 192.168.85.2 --volume no-preload-999106:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1123 08:43:28.713710 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Running}}
I1123 08:43:28.734068 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:43:28.754748 258086 cli_runner.go:164] Run: docker exec no-preload-999106 stat /var/lib/dpkg/alternatives/iptables
I1123 08:43:28.804354 258086 oci.go:144] the created container "no-preload-999106" has a running status.
I1123 08:43:28.804388 258086 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa...
I1123 08:43:28.861878 258086 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1123 08:43:28.899755 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:43:28.921384 258086 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1123 08:43:28.921408 258086 kic_runner.go:114] Args: [docker exec --privileged no-preload-999106 chown docker:docker /home/docker/.ssh/authorized_keys]
I1123 08:43:28.971140 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:43:28.992543 258086 machine.go:94] provisionDockerMachine start ...
I1123 08:43:28.992659 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:29.017873 258086 main.go:143] libmachine: Using SSH client type: native
I1123 08:43:29.018228 258086 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1123 08:43:29.018252 258086 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:43:29.019229 258086 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:57704->127.0.0.1:33063: read: connection reset by peer
I1123 08:43:29.339938 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1123 08:43:29.339967 258086 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 1.677878189s
I1123 08:43:29.339993 258086 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1123 08:43:29.349964 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1123 08:43:29.349997 258086 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 1.688022096s
I1123 08:43:29.350017 258086 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1123 08:43:29.423577 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1123 08:43:29.423607 258086 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 1.761664135s
I1123 08:43:29.423620 258086 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1123 08:43:29.487535 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1123 08:43:29.487565 258086 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 1.825655813s
I1123 08:43:29.487576 258086 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1123 08:43:29.829693 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1123 08:43:29.829727 258086 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 2.16770936s
I1123 08:43:29.829741 258086 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1123 08:43:29.829763 258086 cache.go:87] Successfully saved all images to host disk.
I1123 08:43:32.164591 258086 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-999106
I1123 08:43:32.164618 258086 ubuntu.go:182] provisioning hostname "no-preload-999106"
I1123 08:43:32.164701 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.183134 258086 main.go:143] libmachine: Using SSH client type: native
I1123 08:43:32.183339 258086 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1123 08:43:32.183352 258086 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-999106 && echo "no-preload-999106" | sudo tee /etc/hostname
I1123 08:43:32.340889 258086 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-999106
I1123 08:43:32.340971 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.359419 258086 main.go:143] libmachine: Using SSH client type: native
I1123 08:43:32.359677 258086 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1123 08:43:32.359696 258086 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-999106' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-999106/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-999106' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:43:29.985991 254114 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1123 08:43:29.990966 254114 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1123 08:43:29.990985 254114 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1123 08:43:30.005005 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1123 08:43:30.649440 254114 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:43:30.649546 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:30.649581 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-204346 minikube.k8s.io/updated_at=2025_11_23T08_43_30_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=50c3a8a3c03e8a84b6c978a884d21c3de8c6d4f1 minikube.k8s.io/name=old-k8s-version-204346 minikube.k8s.io/primary=true
I1123 08:43:30.659700 254114 ops.go:34] apiserver oom_adj: -16
I1123 08:43:30.729410 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:31.230340 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:31.730113 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:32.230535 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:32.729772 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:32.505327 258086 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:43:32.505361 258086 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21969-13876/.minikube CaCertPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21969-13876/.minikube}
I1123 08:43:32.505408 258086 ubuntu.go:190] setting up certificates
I1123 08:43:32.505430 258086 provision.go:84] configureAuth start
I1123 08:43:32.505484 258086 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-999106
I1123 08:43:32.523951 258086 provision.go:143] copyHostCerts
I1123 08:43:32.524019 258086 exec_runner.go:144] found /home/jenkins/minikube-integration/21969-13876/.minikube/key.pem, removing ...
I1123 08:43:32.524033 258086 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21969-13876/.minikube/key.pem
I1123 08:43:32.524115 258086 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21969-13876/.minikube/key.pem (1675 bytes)
I1123 08:43:32.524235 258086 exec_runner.go:144] found /home/jenkins/minikube-integration/21969-13876/.minikube/ca.pem, removing ...
I1123 08:43:32.524248 258086 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21969-13876/.minikube/ca.pem
I1123 08:43:32.524289 258086 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21969-13876/.minikube/ca.pem (1078 bytes)
I1123 08:43:32.524373 258086 exec_runner.go:144] found /home/jenkins/minikube-integration/21969-13876/.minikube/cert.pem, removing ...
I1123 08:43:32.524383 258086 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21969-13876/.minikube/cert.pem
I1123 08:43:32.524416 258086 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21969-13876/.minikube/cert.pem (1123 bytes)
I1123 08:43:32.524499 258086 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21969-13876/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca-key.pem org=jenkins.no-preload-999106 san=[127.0.0.1 192.168.85.2 localhost minikube no-preload-999106]
I1123 08:43:32.587554 258086 provision.go:177] copyRemoteCerts
I1123 08:43:32.587609 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:43:32.587655 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.605984 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:32.708249 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1123 08:43:32.727969 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 08:43:32.747752 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1123 08:43:32.766001 258086 provision.go:87] duration metric: took 260.555897ms to configureAuth
I1123 08:43:32.766029 258086 ubuntu.go:206] setting minikube options for container-runtime
I1123 08:43:32.766187 258086 config.go:182] Loaded profile config "no-preload-999106": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:43:32.766198 258086 machine.go:97] duration metric: took 3.773633247s to provisionDockerMachine
I1123 08:43:32.766204 258086 client.go:176] duration metric: took 5.080019183s to LocalClient.Create
I1123 08:43:32.766223 258086 start.go:167] duration metric: took 5.080101552s to libmachine.API.Create "no-preload-999106"
I1123 08:43:32.766232 258086 start.go:293] postStartSetup for "no-preload-999106" (driver="docker")
I1123 08:43:32.766242 258086 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:43:32.766283 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:43:32.766317 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.785085 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:32.889673 258086 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:43:32.893433 258086 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1123 08:43:32.893459 258086 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1123 08:43:32.893470 258086 filesync.go:126] Scanning /home/jenkins/minikube-integration/21969-13876/.minikube/addons for local assets ...
I1123 08:43:32.893520 258086 filesync.go:126] Scanning /home/jenkins/minikube-integration/21969-13876/.minikube/files for local assets ...
I1123 08:43:32.893624 258086 filesync.go:149] local asset: /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem -> 174422.pem in /etc/ssl/certs
I1123 08:43:32.893761 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:43:32.902075 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem --> /etc/ssl/certs/174422.pem (1708 bytes)
I1123 08:43:32.921898 258086 start.go:296] duration metric: took 155.652278ms for postStartSetup
I1123 08:43:32.922243 258086 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-999106
I1123 08:43:32.940711 258086 profile.go:143] Saving config to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/config.json ...
I1123 08:43:32.940999 258086 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1123 08:43:32.941041 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.959311 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:33.058968 258086 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1123 08:43:33.063670 258086 start.go:128] duration metric: took 5.380278318s to createHost
I1123 08:43:33.063696 258086 start.go:83] releasing machines lock for "no-preload-999106", held for 5.380396187s
I1123 08:43:33.063776 258086 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-999106
I1123 08:43:33.082497 258086 ssh_runner.go:195] Run: cat /version.json
I1123 08:43:33.082555 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:33.082576 258086 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:43:33.082676 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:33.101516 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:33.101929 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:33.258150 258086 ssh_runner.go:195] Run: systemctl --version
I1123 08:43:33.265003 258086 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:43:33.270133 258086 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:43:33.270202 258086 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:43:33.301093 258086 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:43:33.301114 258086 start.go:496] detecting cgroup driver to use...
I1123 08:43:33.301140 258086 detect.go:190] detected "systemd" cgroup driver on host os
I1123 08:43:33.301187 258086 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:43:33.316380 258086 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:43:33.328339 258086 docker.go:218] disabling cri-docker service (if available) ...
I1123 08:43:33.328388 258086 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1123 08:43:33.344573 258086 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1123 08:43:33.362321 258086 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1123 08:43:33.449438 258086 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1123 08:43:33.532610 258086 docker.go:234] disabling docker service ...
I1123 08:43:33.532689 258086 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1123 08:43:33.551827 258086 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1123 08:43:33.564985 258086 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1123 08:43:33.650121 258086 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1123 08:43:33.736173 258086 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:43:33.749245 258086 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:43:33.764351 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:43:33.774567 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:43:33.784258 258086 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1123 08:43:33.784327 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1123 08:43:33.794411 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:43:33.804033 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:43:33.812857 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:43:33.821787 258086 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:43:33.829930 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:43:33.839002 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:43:33.847926 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:43:33.856822 258086 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:43:33.864542 258086 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:43:33.871885 258086 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:43:33.950854 258086 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:43:34.024458 258086 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1123 08:43:34.024534 258086 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1123 08:43:34.029083 258086 start.go:564] Will wait 60s for crictl version
I1123 08:43:34.029145 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.032799 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1123 08:43:34.057987 258086 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1123 08:43:34.058049 258086 ssh_runner.go:195] Run: containerd --version
I1123 08:43:34.078381 258086 ssh_runner.go:195] Run: containerd --version
I1123 08:43:34.100680 258086 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1123 08:43:36.163341 206485 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.069407293s)
W1123 08:43:36.163379 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1123 08:43:36.163391 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:36.163401 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:36.196694 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:36.196725 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:36.230996 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:36.231018 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:36.266205 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:36.266235 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:34.101669 258086 cli_runner.go:164] Run: docker network inspect no-preload-999106 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:43:34.119192 258086 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1123 08:43:34.123375 258086 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:43:34.134033 258086 kubeadm.go:884] updating cluster {Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:43:34.134129 258086 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 08:43:34.134170 258086 ssh_runner.go:195] Run: sudo crictl images --output json
I1123 08:43:34.159373 258086 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1123 08:43:34.159392 258086 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1123 08:43:34.159438 258086 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:34.159452 258086 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.159485 258086 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.159504 258086 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.159534 258086 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.159485 258086 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1123 08:43:34.159583 258086 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.159658 258086 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.161000 258086 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.161332 258086 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.161540 258086 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:34.161951 258086 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.162137 258086 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.162179 258086 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.162238 258086 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1123 08:43:34.162370 258086 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.303423 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1123 08:43:34.303507 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.304294 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1123 08:43:34.304346 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.325396 258086 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1123 08:43:34.325443 258086 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.325489 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.325396 258086 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1123 08:43:34.325524 258086 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.325560 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.329408 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.330479 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.332092 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1123 08:43:34.332130 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1123 08:43:34.334793 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1123 08:43:34.334839 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.334892 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1123 08:43:34.334947 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.359405 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.359448 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.359453 258086 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1123 08:43:34.359480 258086 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1123 08:43:34.359511 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.359927 258086 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1123 08:43:34.359953 258086 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.359986 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.362071 258086 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1123 08:43:34.362107 258086 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.362148 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.386773 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.388038 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.388124 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1123 08:43:34.388148 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.388227 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.402862 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1123 08:43:34.402936 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.406588 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1123 08:43:34.406683 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.419900 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1123 08:43:34.420019 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1123 08:43:34.422632 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1123 08:43:34.422820 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1123 08:43:34.422852 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1123 08:43:34.422867 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.422905 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.432625 258086 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1123 08:43:34.432698 258086 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.432750 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.435170 258086 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1123 08:43:34.435213 258086 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.435236 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1123 08:43:34.435258 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.435263 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1123 08:43:34.468602 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1123 08:43:34.468621 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1123 08:43:34.468654 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1123 08:43:34.468703 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.468726 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.468757 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.468795 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.563471 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1123 08:43:34.563530 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.563577 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1123 08:43:34.563667 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1123 08:43:34.563682 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.563581 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1123 08:43:34.563706 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1123 08:43:34.563755 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1123 08:43:34.626877 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1123 08:43:34.626895 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1123 08:43:34.626913 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1123 08:43:34.626923 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1123 08:43:34.626927 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1123 08:43:34.626943 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1123 08:43:34.626974 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.627042 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.685224 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1123 08:43:34.685246 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1123 08:43:34.685326 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1123 08:43:34.685340 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1123 08:43:34.700613 258086 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1123 08:43:34.700688 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1123 08:43:34.713376 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1123 08:43:34.713409 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1123 08:43:34.713407 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1123 08:43:34.713434 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1123 08:43:34.840943 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1123 08:43:34.885583 258086 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1123 08:43:34.885674 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1123 08:43:35.489785 258086 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1123 08:43:35.489853 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:36.097868 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.212165923s)
I1123 08:43:36.097898 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1123 08:43:36.097915 258086 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1123 08:43:36.097931 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1123 08:43:36.097957 258086 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:36.097992 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1123 08:43:36.098005 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:37.105043 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.007027025s)
I1123 08:43:37.105070 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1123 08:43:37.105098 258086 ssh_runner.go:235] Completed: which crictl: (1.007074313s)
I1123 08:43:37.105104 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1123 08:43:37.105153 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:37.105159 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1123 08:43:37.133915 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:33.230087 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:33.729573 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:34.229556 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:34.729739 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:35.229458 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:35.729622 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:36.229768 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:36.730508 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:37.229765 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:37.729788 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:38.229952 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:38.730333 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:39.229833 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:39.729862 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:40.229901 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:40.729885 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:41.230479 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:41.730515 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:42.230247 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:42.326336 254114 kubeadm.go:1114] duration metric: took 11.676850942s to wait for elevateKubeSystemPrivileges
I1123 08:43:42.326376 254114 kubeadm.go:403] duration metric: took 21.509472133s to StartCluster
I1123 08:43:42.326398 254114 settings.go:142] acquiring lock: {Name:mk2c00a8b461754a49d5c7fd5af34c7d1005153a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:42.326470 254114 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21969-13876/kubeconfig
I1123 08:43:42.328223 254114 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/kubeconfig: {Name:mk636046b7146fd65b5638a6d549b76e61f7f055 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:42.328482 254114 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1123 08:43:42.328500 254114 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:43:42.328566 254114 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:43:42.328729 254114 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-204346"
I1123 08:43:42.328754 254114 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-204346"
I1123 08:43:42.328778 254114 config.go:182] Loaded profile config "old-k8s-version-204346": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:43:42.328793 254114 host.go:66] Checking if "old-k8s-version-204346" exists ...
I1123 08:43:42.328837 254114 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-204346"
I1123 08:43:42.328856 254114 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-204346"
I1123 08:43:42.329183 254114 cli_runner.go:164] Run: docker container inspect old-k8s-version-204346 --format={{.State.Status}}
I1123 08:43:42.329321 254114 cli_runner.go:164] Run: docker container inspect old-k8s-version-204346 --format={{.State.Status}}
I1123 08:43:42.331021 254114 out.go:179] * Verifying Kubernetes components...
I1123 08:43:42.332482 254114 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:43:42.357866 254114 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:38.827550 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:38.827977 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:38.828023 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:38.828070 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:38.854573 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:38.854598 206485 cri.go:89] found id: "89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030"
I1123 08:43:38.854603 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:38.854606 206485 cri.go:89] found id: ""
I1123 08:43:38.854613 206485 logs.go:282] 3 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:38.854688 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.858901 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.862744 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.866475 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:38.866533 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:38.892493 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:38.892520 206485 cri.go:89] found id: ""
I1123 08:43:38.892528 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:38.892575 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.896728 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:38.896790 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:38.923307 206485 cri.go:89] found id: ""
I1123 08:43:38.923331 206485 logs.go:282] 0 containers: []
W1123 08:43:38.923340 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:38.923346 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:38.923392 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:38.949371 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:38.949396 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:38.949401 206485 cri.go:89] found id: ""
I1123 08:43:38.949407 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:38.949452 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.953461 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.957266 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:38.957315 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:38.984054 206485 cri.go:89] found id: ""
I1123 08:43:38.984077 206485 logs.go:282] 0 containers: []
W1123 08:43:38.984084 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:38.984090 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:38.984144 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:39.014867 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:39.014894 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:39.014900 206485 cri.go:89] found id: ""
I1123 08:43:39.014909 206485 logs.go:282] 2 containers: [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:39.014988 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:39.019876 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:39.024471 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:39.024545 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:39.056343 206485 cri.go:89] found id: ""
I1123 08:43:39.056370 206485 logs.go:282] 0 containers: []
W1123 08:43:39.056382 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:39.056390 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:39.056447 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:39.087173 206485 cri.go:89] found id: ""
I1123 08:43:39.087200 206485 logs.go:282] 0 containers: []
W1123 08:43:39.087209 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:39.087218 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:39.087230 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:39.143340 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:39.143373 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:39.182502 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:39.182538 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:39.220490 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:39.220526 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:39.279713 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:39.279751 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:39.296632 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:39.296672 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:39.369445 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:39.369477 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:39.369493 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:39.412743 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:39.412782 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:39.445988 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:39.446015 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:39.482074 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:39.482110 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:39.578994 206485 logs.go:123] Gathering logs for kube-apiserver [89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030] ...
I1123 08:43:39.579036 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030"
I1123 08:43:39.619624 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:39.619684 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:39.661136 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:39.661175 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:42.204267 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:42.204712 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:42.204771 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:42.204826 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:42.232709 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:42.232730 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:42.232735 206485 cri.go:89] found id: ""
I1123 08:43:42.232744 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:42.232799 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.236622 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.240968 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:42.241028 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:42.281849 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:42.281877 206485 cri.go:89] found id: ""
I1123 08:43:42.281885 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:42.281942 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.287991 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:42.288063 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:42.327625 206485 cri.go:89] found id: ""
I1123 08:43:42.327669 206485 logs.go:282] 0 containers: []
W1123 08:43:42.327679 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:42.327687 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:42.327768 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:39.015203 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.910026064s)
I1123 08:43:39.015228 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1123 08:43:39.015249 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1123 08:43:39.015286 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1123 08:43:39.015301 258086 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.881356677s)
I1123 08:43:39.015367 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:39.981839 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1123 08:43:39.981862 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1123 08:43:39.981901 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1123 08:43:39.981948 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1123 08:43:39.981955 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1123 08:43:39.985933 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1123 08:43:39.985965 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1123 08:43:41.077380 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.095406466s)
I1123 08:43:41.077408 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1123 08:43:41.077435 258086 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1123 08:43:41.077497 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1123 08:43:42.358205 254114 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-204346"
I1123 08:43:42.358246 254114 host.go:66] Checking if "old-k8s-version-204346" exists ...
I1123 08:43:42.358752 254114 cli_runner.go:164] Run: docker container inspect old-k8s-version-204346 --format={{.State.Status}}
I1123 08:43:42.359206 254114 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:43:42.359225 254114 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:43:42.359285 254114 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-204346
I1123 08:43:42.389614 254114 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:43:42.389635 254114 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:43:42.389707 254114 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-204346
I1123 08:43:42.391185 254114 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/old-k8s-version-204346/id_rsa Username:docker}
I1123 08:43:42.422459 254114 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/old-k8s-version-204346/id_rsa Username:docker}
I1123 08:43:42.449217 254114 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1123 08:43:42.517611 254114 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:43:42.534960 254114 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:43:42.564953 254114 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:43:42.780756 254114 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1123 08:43:42.781954 254114 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-204346" to be "Ready" ...
I1123 08:43:43.034443 254114 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1123 08:43:43.035744 254114 addons.go:530] duration metric: took 707.164659ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1123 08:43:42.368955 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:42.368979 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:42.368985 206485 cri.go:89] found id: ""
I1123 08:43:42.368996 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:42.370472 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.378043 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.388658 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:42.388749 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:42.429522 206485 cri.go:89] found id: ""
I1123 08:43:42.429549 206485 logs.go:282] 0 containers: []
W1123 08:43:42.429559 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:42.429566 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:42.429632 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:42.469043 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:42.469070 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:42.469076 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:42.469081 206485 cri.go:89] found id: ""
I1123 08:43:42.469089 206485 logs.go:282] 3 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:42.469144 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.475315 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.481874 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.488696 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:42.488921 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:42.533856 206485 cri.go:89] found id: ""
I1123 08:43:42.533914 206485 logs.go:282] 0 containers: []
W1123 08:43:42.533926 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:42.533934 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:42.534029 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:42.577521 206485 cri.go:89] found id: ""
I1123 08:43:42.577543 206485 logs.go:282] 0 containers: []
W1123 08:43:42.577550 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:42.577559 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:42.577568 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:42.665576 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:42.665601 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:42.665622 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:42.723908 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:42.723945 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:42.766588 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:42.766618 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:42.815960 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:42.816050 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:42.836362 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:42.836393 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:42.883211 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:42.883249 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:42.925983 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:42.926057 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:43.002532 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:43.002565 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:43.048891 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:43.048923 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:43.080573 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:43.080606 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:43.145471 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:43.145510 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:43.182994 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:43.183035 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:45.803715 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:45.804092 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:45.804151 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:45.804211 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:45.842142 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:45.842161 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:45.842165 206485 cri.go:89] found id: ""
I1123 08:43:45.842172 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:45.842223 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.846225 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.850730 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:45.850797 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:45.879479 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:45.879506 206485 cri.go:89] found id: ""
I1123 08:43:45.879515 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:45.879576 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.884738 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:45.884801 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:45.916040 206485 cri.go:89] found id: ""
I1123 08:43:45.916069 206485 logs.go:282] 0 containers: []
W1123 08:43:45.916080 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:45.916088 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:45.916155 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:45.947206 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:45.947237 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:45.947242 206485 cri.go:89] found id: ""
I1123 08:43:45.947252 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:45.947308 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.952246 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.956172 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:45.956233 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:45.986919 206485 cri.go:89] found id: ""
I1123 08:43:45.986945 206485 logs.go:282] 0 containers: []
W1123 08:43:45.986956 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:45.986964 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:45.987017 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:46.019241 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:46.019269 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:46.019273 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:46.019278 206485 cri.go:89] found id: ""
I1123 08:43:46.019286 206485 logs.go:282] 3 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:46.019345 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:46.024190 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:46.028847 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:46.033363 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:46.033436 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:46.067781 206485 cri.go:89] found id: ""
I1123 08:43:46.067808 206485 logs.go:282] 0 containers: []
W1123 08:43:46.067819 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:46.067827 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:46.067885 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:46.100053 206485 cri.go:89] found id: ""
I1123 08:43:46.100084 206485 logs.go:282] 0 containers: []
W1123 08:43:46.100094 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:46.100107 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:46.100122 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:46.146426 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:46.146456 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:46.208332 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:46.208375 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:46.247193 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:46.247229 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:46.264714 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:46.264742 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:46.336341 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:46.336363 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:46.336376 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:46.379827 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:46.379866 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:46.425899 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:46.425925 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:46.491769 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:46.491805 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:46.523775 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:46.523805 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:46.555025 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:46.555060 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:46.592667 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:46.592709 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:46.691047 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:46.691081 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:43.958800 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.881269634s)
I1123 08:43:43.958835 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1123 08:43:43.958864 258086 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1123 08:43:43.958908 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1123 08:43:44.336453 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1123 08:43:44.336514 258086 cache_images.go:125] Successfully loaded all cached images
I1123 08:43:44.336522 258086 cache_images.go:94] duration metric: took 10.177118s to LoadCachedImages
I1123 08:43:44.336535 258086 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.34.1 containerd true true} ...
I1123 08:43:44.336675 258086 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-999106 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:43:44.336740 258086 ssh_runner.go:195] Run: sudo crictl info
I1123 08:43:44.361999 258086 cni.go:84] Creating CNI manager for ""
I1123 08:43:44.362021 258086 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:44.362037 258086 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:43:44.362060 258086 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-999106 NodeName:no-preload-999106 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPa
th:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:43:44.362197 258086 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-999106"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:43:44.362266 258086 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:43:44.371147 258086 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1123 08:43:44.371205 258086 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1123 08:43:44.379477 258086 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1123 08:43:44.379559 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1123 08:43:44.379560 258086 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1123 08:43:44.379590 258086 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1123 08:43:44.384906 258086 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1123 08:43:44.384935 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1123 08:43:45.307760 258086 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:43:45.321272 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1123 08:43:45.325776 258086 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1123 08:43:45.325807 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1123 08:43:45.440984 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1123 08:43:45.448490 258086 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1123 08:43:45.448546 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1123 08:43:45.718942 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:43:45.729752 258086 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (321 bytes)
I1123 08:43:45.746904 258086 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:43:45.764606 258086 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2229 bytes)
I1123 08:43:45.779438 258086 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1123 08:43:45.783637 258086 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:43:45.795787 258086 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:43:45.901866 258086 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:43:45.931680 258086 certs.go:69] Setting up /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106 for IP: 192.168.85.2
I1123 08:43:45.931702 258086 certs.go:195] generating shared ca certs ...
I1123 08:43:45.931722 258086 certs.go:227] acquiring lock for ca certs: {Name:mk376e2c25eb30d8b09b93cb4624441e819bcc8f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:45.931883 258086 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21969-13876/.minikube/ca.key
I1123 08:43:45.931922 258086 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21969-13876/.minikube/proxy-client-ca.key
I1123 08:43:45.931931 258086 certs.go:257] generating profile certs ...
I1123 08:43:45.932023 258086 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.key
I1123 08:43:45.932046 258086 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.crt with IP's: []
I1123 08:43:46.076820 258086 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.crt ...
I1123 08:43:46.076852 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.crt: {Name:mk264e21cffc1d235a0a5153e1f533874608a488 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.077062 258086 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.key ...
I1123 08:43:46.077094 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.key: {Name:mk09f5a31cd584eb4ea102a803f662bacda0e612 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.077204 258086 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c
I1123 08:43:46.077226 258086 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1123 08:43:46.147038 258086 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c ...
I1123 08:43:46.147076 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c: {Name:mk2b60ecfaddc28f6e9e91bd0ff2b48be7ad7023 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.147257 258086 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c ...
I1123 08:43:46.147277 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c: {Name:mk8ce7b23d7c04fba7d8d30f580f5ae25a8eaa1e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.147393 258086 certs.go:382] copying /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c -> /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt
I1123 08:43:46.147504 258086 certs.go:386] copying /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c -> /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key
I1123 08:43:46.147597 258086 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key
I1123 08:43:46.147614 258086 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt with IP's: []
I1123 08:43:46.188254 258086 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt ...
I1123 08:43:46.188285 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt: {Name:mkce831c55c8c6f96bdb743bd92d80212f28ceec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.188486 258086 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key ...
I1123 08:43:46.188506 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key: {Name:mk2b9a4c76ac3acf445fdcb1e14850de2c1a5507 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.188762 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/17442.pem (1338 bytes)
W1123 08:43:46.188820 258086 certs.go:480] ignoring /home/jenkins/minikube-integration/21969-13876/.minikube/certs/17442_empty.pem, impossibly tiny 0 bytes
I1123 08:43:46.188836 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:43:46.188874 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem (1078 bytes)
I1123 08:43:46.188907 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem (1123 bytes)
I1123 08:43:46.188942 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/key.pem (1675 bytes)
I1123 08:43:46.189009 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem (1708 bytes)
I1123 08:43:46.189889 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:43:46.212738 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:43:46.235727 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:43:46.259309 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:43:46.282164 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1123 08:43:46.305443 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1123 08:43:46.328998 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:43:46.351947 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1123 08:43:46.375511 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:43:46.401909 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/certs/17442.pem --> /usr/share/ca-certificates/17442.pem (1338 bytes)
I1123 08:43:46.424180 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem --> /usr/share/ca-certificates/174422.pem (1708 bytes)
I1123 08:43:46.445575 258086 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:43:46.461580 258086 ssh_runner.go:195] Run: openssl version
I1123 08:43:46.468524 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:43:46.477534 258086 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:43:46.482510 258086 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 08:11 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:43:46.482577 258086 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:43:46.523991 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:43:46.535125 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/17442.pem && ln -fs /usr/share/ca-certificates/17442.pem /etc/ssl/certs/17442.pem"
I1123 08:43:46.546052 258086 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/17442.pem
I1123 08:43:46.552569 258086 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:16 /usr/share/ca-certificates/17442.pem
I1123 08:43:46.552702 258086 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/17442.pem
I1123 08:43:46.600806 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/17442.pem /etc/ssl/certs/51391683.0"
I1123 08:43:46.610524 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/174422.pem && ln -fs /usr/share/ca-certificates/174422.pem /etc/ssl/certs/174422.pem"
I1123 08:43:46.621451 258086 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/174422.pem
I1123 08:43:46.625905 258086 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:16 /usr/share/ca-certificates/174422.pem
I1123 08:43:46.625966 258086 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/174422.pem
I1123 08:43:46.663055 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/174422.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:43:46.672614 258086 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:43:46.676799 258086 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1123 08:43:46.676865 258086 kubeadm.go:401] StartCluster: {Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:43:46.676948 258086 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1123 08:43:46.677027 258086 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1123 08:43:46.706515 258086 cri.go:89] found id: ""
I1123 08:43:46.706599 258086 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:43:46.715791 258086 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:43:46.725599 258086 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1123 08:43:46.725695 258086 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:43:46.734727 258086 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:43:46.734752 258086 kubeadm.go:158] found existing configuration files:
I1123 08:43:46.734794 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:43:46.743841 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:43:46.743892 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:43:46.752521 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:43:46.761347 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:43:46.761400 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:43:46.769196 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:43:46.777174 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:43:46.777227 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:43:46.784869 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:43:46.793707 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:43:46.793768 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:43:46.801586 258086 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1123 08:43:46.858285 258086 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1123 08:43:46.916186 258086 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:43:43.286172 254114 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-204346" context rescaled to 1 replicas
W1123 08:43:44.785588 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
W1123 08:43:46.785746 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
I1123 08:43:49.228668 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:49.229070 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:49.229121 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:49.229170 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:49.256973 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:49.256994 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:49.257000 206485 cri.go:89] found id: ""
I1123 08:43:49.257008 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:49.257070 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.261237 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.264766 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:49.264830 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:49.290113 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:49.290135 206485 cri.go:89] found id: ""
I1123 08:43:49.290145 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:49.290199 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.293989 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:49.294053 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:49.320161 206485 cri.go:89] found id: ""
I1123 08:43:49.320191 206485 logs.go:282] 0 containers: []
W1123 08:43:49.320202 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:49.320210 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:49.320264 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:49.347363 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:49.347384 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:49.347391 206485 cri.go:89] found id: ""
I1123 08:43:49.347407 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:49.347464 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.351525 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.355374 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:49.355433 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:49.382984 206485 cri.go:89] found id: ""
I1123 08:43:49.383010 206485 logs.go:282] 0 containers: []
W1123 08:43:49.383020 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:49.383028 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:49.383086 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:49.409377 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:49.409402 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:49.409408 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:49.409413 206485 cri.go:89] found id: ""
I1123 08:43:49.409421 206485 logs.go:282] 3 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:49.409468 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.413850 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.417701 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.421307 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:49.421373 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:49.447409 206485 cri.go:89] found id: ""
I1123 08:43:49.447433 206485 logs.go:282] 0 containers: []
W1123 08:43:49.447444 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:49.447451 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:49.447512 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:49.474526 206485 cri.go:89] found id: ""
I1123 08:43:49.474554 206485 logs.go:282] 0 containers: []
W1123 08:43:49.474562 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:49.474572 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:49.474580 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:49.566947 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:49.566990 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:49.581192 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:49.581218 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:49.640574 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:49.640596 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:49.640610 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:49.676070 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:49.676097 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:49.710524 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:49.710555 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:49.785389 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:49.785422 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:49.819651 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:49.819677 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:49.847192 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:49.847216 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:49.878622 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:49.878674 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:49.904924 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:49.904958 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:49.937225 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:49.937252 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:49.987441 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:49.987483 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1123 08:43:49.285708 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
W1123 08:43:51.285827 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
I1123 08:43:56.990600 258086 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1123 08:43:56.990724 258086 kubeadm.go:319] [preflight] Running pre-flight checks
I1123 08:43:56.990889 258086 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1123 08:43:56.990976 258086 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1123 08:43:56.991027 258086 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1123 08:43:56.991098 258086 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1123 08:43:56.991170 258086 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1123 08:43:56.991327 258086 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1123 08:43:56.991401 258086 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1123 08:43:56.991513 258086 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1123 08:43:56.991594 258086 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1123 08:43:56.991696 258086 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1123 08:43:56.991760 258086 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1123 08:43:56.991928 258086 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1123 08:43:56.992079 258086 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1123 08:43:56.992203 258086 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1123 08:43:56.992277 258086 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1123 08:43:56.993629 258086 out.go:252] - Generating certificates and keys ...
I1123 08:43:56.993773 258086 kubeadm.go:319] [certs] Using existing ca certificate authority
I1123 08:43:56.993882 258086 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1123 08:43:56.993978 258086 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1123 08:43:56.994054 258086 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1123 08:43:56.994139 258086 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1123 08:43:56.994210 258086 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1123 08:43:56.994287 258086 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1123 08:43:56.994448 258086 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-999106] and IPs [192.168.85.2 127.0.0.1 ::1]
I1123 08:43:56.994523 258086 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1123 08:43:56.994701 258086 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-999106] and IPs [192.168.85.2 127.0.0.1 ::1]
I1123 08:43:56.994808 258086 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1123 08:43:56.994907 258086 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1123 08:43:56.994974 258086 kubeadm.go:319] [certs] Generating "sa" key and public key
I1123 08:43:56.995052 258086 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1123 08:43:56.995136 258086 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1123 08:43:56.995230 258086 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1123 08:43:56.995314 258086 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1123 08:43:56.995407 258086 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1123 08:43:56.995507 258086 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1123 08:43:56.995596 258086 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1123 08:43:56.995670 258086 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1123 08:43:56.998197 258086 out.go:252] - Booting up control plane ...
I1123 08:43:56.998282 258086 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:43:56.998367 258086 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:43:56.998479 258086 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:43:56.998614 258086 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:43:56.998760 258086 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1123 08:43:56.998861 258086 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1123 08:43:56.998949 258086 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:43:56.998984 258086 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:43:56.999108 258086 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1123 08:43:56.999224 258086 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1123 08:43:56.999284 258086 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 501.962401ms
I1123 08:43:56.999376 258086 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1123 08:43:56.999453 258086 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8443/livez
I1123 08:43:56.999531 258086 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1123 08:43:56.999598 258086 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1123 08:43:56.999680 258086 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.69972236s
I1123 08:43:56.999756 258086 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.979262438s
I1123 08:43:56.999857 258086 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 3.502236354s
I1123 08:43:56.999983 258086 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:43:57.000181 258086 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:43:57.000269 258086 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:43:57.000528 258086 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-999106 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:43:57.000596 258086 kubeadm.go:319] [bootstrap-token] Using token: augmq1.wtvrtjusohbhz9fp
I1123 08:43:57.002234 258086 out.go:252] - Configuring RBAC rules ...
I1123 08:43:57.002330 258086 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:43:57.002408 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:43:57.002539 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:43:57.002709 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:43:57.002823 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:43:57.002898 258086 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:43:57.003040 258086 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:43:57.003091 258086 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:43:57.003157 258086 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:43:57.003173 258086 kubeadm.go:319]
I1123 08:43:57.003224 258086 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:43:57.003229 258086 kubeadm.go:319]
I1123 08:43:57.003293 258086 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:43:57.003299 258086 kubeadm.go:319]
I1123 08:43:57.003325 258086 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:43:57.003380 258086 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:43:57.003424 258086 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:43:57.003429 258086 kubeadm.go:319]
I1123 08:43:57.003474 258086 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:43:57.003483 258086 kubeadm.go:319]
I1123 08:43:57.003523 258086 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:43:57.003529 258086 kubeadm.go:319]
I1123 08:43:57.003586 258086 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:43:57.003674 258086 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:43:57.003774 258086 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:43:57.003795 258086 kubeadm.go:319]
I1123 08:43:57.003914 258086 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:43:57.004021 258086 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:43:57.004031 258086 kubeadm.go:319]
I1123 08:43:57.004153 258086 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token augmq1.wtvrtjusohbhz9fp \
I1123 08:43:57.004275 258086 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa \
I1123 08:43:57.004298 258086 kubeadm.go:319] --control-plane
I1123 08:43:57.004302 258086 kubeadm.go:319]
I1123 08:43:57.004373 258086 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:43:57.004379 258086 kubeadm.go:319]
I1123 08:43:57.004452 258086 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token augmq1.wtvrtjusohbhz9fp \
I1123 08:43:57.004563 258086 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa
I1123 08:43:57.004575 258086 cni.go:84] Creating CNI manager for ""
I1123 08:43:57.004581 258086 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:57.007194 258086 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1123 08:43:52.520061 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:52.520694 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:52.520747 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:52.520799 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:52.553943 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:52.553969 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:52.553975 206485 cri.go:89] found id: ""
I1123 08:43:52.553983 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:52.554042 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.559842 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.565197 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:52.565266 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:52.601499 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:52.601529 206485 cri.go:89] found id: ""
I1123 08:43:52.601568 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:52.601621 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.606848 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:52.606925 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:52.645028 206485 cri.go:89] found id: ""
I1123 08:43:52.645061 206485 logs.go:282] 0 containers: []
W1123 08:43:52.645072 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:52.645079 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:52.645139 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:52.681457 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:52.681484 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:52.681490 206485 cri.go:89] found id: ""
I1123 08:43:52.681499 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:52.681557 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.686548 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.690588 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:52.690682 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:52.723180 206485 cri.go:89] found id: ""
I1123 08:43:52.723208 206485 logs.go:282] 0 containers: []
W1123 08:43:52.723217 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:52.723224 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:52.723287 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:52.756887 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:52.756911 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:52.756921 206485 cri.go:89] found id: ""
I1123 08:43:52.756929 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:52.756985 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.761180 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.765188 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:52.765247 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:52.795290 206485 cri.go:89] found id: ""
I1123 08:43:52.795319 206485 logs.go:282] 0 containers: []
W1123 08:43:52.795329 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:52.795336 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:52.795395 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:52.822978 206485 cri.go:89] found id: ""
I1123 08:43:52.823006 206485 logs.go:282] 0 containers: []
W1123 08:43:52.823013 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:52.823022 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:52.823034 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:52.859205 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:52.859240 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:52.910295 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:52.910334 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:52.948004 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:52.948045 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:52.982700 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:52.982734 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:53.055592 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:53.055634 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:53.097286 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:53.097327 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:53.133102 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:53.133146 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:53.170688 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:53.170722 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:53.281419 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:53.281464 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:53.298748 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:53.298777 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:53.373016 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:53.373040 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:53.373054 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:55.914776 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:55.915250 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:55.915303 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:55.915351 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:55.943544 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:55.943567 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:55.943572 206485 cri.go:89] found id: ""
I1123 08:43:55.943579 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:55.943622 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:55.948391 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:55.952924 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:55.952992 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:55.981407 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:55.981431 206485 cri.go:89] found id: ""
I1123 08:43:55.981441 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:55.981501 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:55.986304 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:55.986378 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:56.014167 206485 cri.go:89] found id: ""
I1123 08:43:56.014192 206485 logs.go:282] 0 containers: []
W1123 08:43:56.014200 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:56.014206 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:56.014262 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:56.050121 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:56.050153 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:56.050160 206485 cri.go:89] found id: ""
I1123 08:43:56.050170 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:56.050236 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.055306 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.059507 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:56.059586 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:56.092810 206485 cri.go:89] found id: ""
I1123 08:43:56.092843 206485 logs.go:282] 0 containers: []
W1123 08:43:56.092856 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:56.092864 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:56.092931 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:56.126845 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:56.126869 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:56.126874 206485 cri.go:89] found id: ""
I1123 08:43:56.126884 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:56.126939 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.131943 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.135880 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:56.135945 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:56.163669 206485 cri.go:89] found id: ""
I1123 08:43:56.163696 206485 logs.go:282] 0 containers: []
W1123 08:43:56.163707 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:56.163714 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:56.163773 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:56.197602 206485 cri.go:89] found id: ""
I1123 08:43:56.197638 206485 logs.go:282] 0 containers: []
W1123 08:43:56.197660 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:56.197672 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:56.197689 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:56.238940 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:56.238981 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:56.288636 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:56.288691 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:56.324266 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:56.324299 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:56.378458 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:56.378498 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:56.417284 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:56.417313 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:56.509149 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:56.509182 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:56.523057 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:56.523082 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:56.583048 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:56.583074 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:56.583095 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:56.618320 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:56.618358 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:56.651682 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:56.651713 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:56.709657 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:56.709694 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:57.008714 258086 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1123 08:43:57.013402 258086 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1123 08:43:57.013443 258086 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1123 08:43:57.028881 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1123 08:43:57.253419 258086 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:43:57.253530 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:57.253599 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-999106 minikube.k8s.io/updated_at=2025_11_23T08_43_57_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=50c3a8a3c03e8a84b6c978a884d21c3de8c6d4f1 minikube.k8s.io/name=no-preload-999106 minikube.k8s.io/primary=true
I1123 08:43:57.264168 258086 ops.go:34] apiserver oom_adj: -16
I1123 08:43:57.330032 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
W1123 08:43:53.286319 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
W1123 08:43:55.786003 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
I1123 08:43:57.285411 254114 node_ready.go:49] node "old-k8s-version-204346" is "Ready"
I1123 08:43:57.285445 254114 node_ready.go:38] duration metric: took 14.503433565s for node "old-k8s-version-204346" to be "Ready" ...
I1123 08:43:57.285462 254114 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:43:57.285564 254114 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:43:57.301686 254114 api_server.go:72] duration metric: took 14.973147695s to wait for apiserver process to appear ...
I1123 08:43:57.301718 254114 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:43:57.301742 254114 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1123 08:43:57.306545 254114 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1123 08:43:57.308093 254114 api_server.go:141] control plane version: v1.28.0
I1123 08:43:57.308124 254114 api_server.go:131] duration metric: took 6.398178ms to wait for apiserver health ...
I1123 08:43:57.308135 254114 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:43:57.312486 254114 system_pods.go:59] 8 kube-system pods found
I1123 08:43:57.312519 254114 system_pods.go:61] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:43:57.312525 254114 system_pods.go:61] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:57.312530 254114 system_pods.go:61] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:57.312539 254114 system_pods.go:61] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:57.312542 254114 system_pods.go:61] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:57.312546 254114 system_pods.go:61] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:57.312548 254114 system_pods.go:61] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:57.312553 254114 system_pods.go:61] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:43:57.312559 254114 system_pods.go:74] duration metric: took 4.418082ms to wait for pod list to return data ...
I1123 08:43:57.312566 254114 default_sa.go:34] waiting for default service account to be created ...
I1123 08:43:57.315607 254114 default_sa.go:45] found service account: "default"
I1123 08:43:57.315634 254114 default_sa.go:55] duration metric: took 3.061615ms for default service account to be created ...
I1123 08:43:57.315674 254114 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 08:43:57.320602 254114 system_pods.go:86] 8 kube-system pods found
I1123 08:43:57.320629 254114 system_pods.go:89] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:43:57.320634 254114 system_pods.go:89] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:57.320639 254114 system_pods.go:89] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:57.320657 254114 system_pods.go:89] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:57.320663 254114 system_pods.go:89] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:57.320668 254114 system_pods.go:89] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:57.320673 254114 system_pods.go:89] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:57.320679 254114 system_pods.go:89] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:43:57.320708 254114 retry.go:31] will retry after 281.398987ms: missing components: kube-dns
I1123 08:43:57.607881 254114 system_pods.go:86] 8 kube-system pods found
I1123 08:43:57.607919 254114 system_pods.go:89] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:43:57.607927 254114 system_pods.go:89] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:57.607936 254114 system_pods.go:89] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:57.607942 254114 system_pods.go:89] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:57.607948 254114 system_pods.go:89] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:57.607952 254114 system_pods.go:89] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:57.607957 254114 system_pods.go:89] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:57.607964 254114 system_pods.go:89] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:43:57.607991 254114 retry.go:31] will retry after 389.750642ms: missing components: kube-dns
I1123 08:43:58.002207 254114 system_pods.go:86] 8 kube-system pods found
I1123 08:43:58.002234 254114 system_pods.go:89] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Running
I1123 08:43:58.002240 254114 system_pods.go:89] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:58.002249 254114 system_pods.go:89] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:58.002253 254114 system_pods.go:89] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:58.002257 254114 system_pods.go:89] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:58.002261 254114 system_pods.go:89] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:58.002264 254114 system_pods.go:89] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:58.002267 254114 system_pods.go:89] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Running
I1123 08:43:58.002275 254114 system_pods.go:126] duration metric: took 686.59398ms to wait for k8s-apps to be running ...
I1123 08:43:58.002285 254114 system_svc.go:44] waiting for kubelet service to be running ....
I1123 08:43:58.002331 254114 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:43:58.016798 254114 system_svc.go:56] duration metric: took 14.504815ms WaitForService to wait for kubelet
I1123 08:43:58.016829 254114 kubeadm.go:587] duration metric: took 15.688298138s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:43:58.016854 254114 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:43:58.021952 254114 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1123 08:43:58.021983 254114 node_conditions.go:123] node cpu capacity is 8
I1123 08:43:58.022010 254114 node_conditions.go:105] duration metric: took 5.146561ms to run NodePressure ...
I1123 08:43:58.022026 254114 start.go:242] waiting for startup goroutines ...
I1123 08:43:58.022040 254114 start.go:247] waiting for cluster config update ...
I1123 08:43:58.022056 254114 start.go:256] writing updated cluster config ...
I1123 08:43:58.022354 254114 ssh_runner.go:195] Run: rm -f paused
I1123 08:43:58.026482 254114 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:43:58.030783 254114 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-2fdsv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.035326 254114 pod_ready.go:94] pod "coredns-5dd5756b68-2fdsv" is "Ready"
I1123 08:43:58.035351 254114 pod_ready.go:86] duration metric: took 4.542747ms for pod "coredns-5dd5756b68-2fdsv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.038155 254114 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.042389 254114 pod_ready.go:94] pod "etcd-old-k8s-version-204346" is "Ready"
I1123 08:43:58.042413 254114 pod_ready.go:86] duration metric: took 4.236026ms for pod "etcd-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.045530 254114 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.049686 254114 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-204346" is "Ready"
I1123 08:43:58.049708 254114 pod_ready.go:86] duration metric: took 4.151976ms for pod "kube-apiserver-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.052167 254114 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.430619 254114 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-204346" is "Ready"
I1123 08:43:58.430662 254114 pod_ready.go:86] duration metric: took 378.478321ms for pod "kube-controller-manager-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.631434 254114 pod_ready.go:83] waiting for pod "kube-proxy-tzq9b" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.030458 254114 pod_ready.go:94] pod "kube-proxy-tzq9b" is "Ready"
I1123 08:43:59.030484 254114 pod_ready.go:86] duration metric: took 399.024693ms for pod "kube-proxy-tzq9b" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.231371 254114 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.630789 254114 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-204346" is "Ready"
I1123 08:43:59.630824 254114 pod_ready.go:86] duration metric: took 399.424476ms for pod "kube-scheduler-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.630840 254114 pod_ready.go:40] duration metric: took 1.604329749s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:43:59.682106 254114 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1123 08:43:59.683780 254114 out.go:203]
W1123 08:43:59.685129 254114 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1123 08:43:59.686407 254114 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1123 08:43:59.689781 254114 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-204346" cluster and "default" namespace by default
I1123 08:43:59.237742 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:59.238210 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:59.238271 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:59.238328 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:59.266168 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:59.266191 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:59.266197 206485 cri.go:89] found id: ""
I1123 08:43:59.266205 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:59.266261 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.270518 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.274380 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:59.274439 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:59.301514 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:59.301542 206485 cri.go:89] found id: ""
I1123 08:43:59.301552 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:59.301612 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.305940 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:59.306010 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:59.332361 206485 cri.go:89] found id: ""
I1123 08:43:59.332384 206485 logs.go:282] 0 containers: []
W1123 08:43:59.332394 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:59.332402 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:59.332453 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:59.360415 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:59.360515 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:59.360533 206485 cri.go:89] found id: ""
I1123 08:43:59.360541 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:59.360600 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.364967 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.369350 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:59.369411 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:59.400932 206485 cri.go:89] found id: ""
I1123 08:43:59.400960 206485 logs.go:282] 0 containers: []
W1123 08:43:59.400971 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:59.400979 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:59.401039 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:59.426988 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:59.427009 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:59.427013 206485 cri.go:89] found id: ""
I1123 08:43:59.427019 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:59.427065 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.431308 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.435139 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:59.435187 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:59.461062 206485 cri.go:89] found id: ""
I1123 08:43:59.461089 206485 logs.go:282] 0 containers: []
W1123 08:43:59.461098 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:59.461106 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:59.461156 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:59.487437 206485 cri.go:89] found id: ""
I1123 08:43:59.487458 206485 logs.go:282] 0 containers: []
W1123 08:43:59.487467 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:59.487476 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:59.487487 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:59.520087 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:59.520115 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:59.551620 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:59.551662 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:59.610836 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:59.610857 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:59.610875 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:59.647413 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:59.647458 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:59.686992 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:59.687024 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:59.724084 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:59.724115 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:59.760830 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:59.760916 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:59.811485 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:59.811519 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:59.920592 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:59.920624 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:59.937635 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:59.937681 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:59.974909 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:59.974948 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:57.830451 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:58.330875 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:58.830628 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:59.330282 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:59.830162 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:00.330422 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:00.830950 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:01.330805 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:01.830841 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:02.330880 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:02.414724 258086 kubeadm.go:1114] duration metric: took 5.161257652s to wait for elevateKubeSystemPrivileges
I1123 08:44:02.414756 258086 kubeadm.go:403] duration metric: took 15.737896165s to StartCluster
I1123 08:44:02.414776 258086 settings.go:142] acquiring lock: {Name:mk2c00a8b461754a49d5c7fd5af34c7d1005153a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:44:02.414842 258086 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21969-13876/kubeconfig
I1123 08:44:02.416821 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/kubeconfig: {Name:mk636046b7146fd65b5638a6d549b76e61f7f055 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:44:02.417741 258086 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:44:02.417762 258086 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1123 08:44:02.417786 258086 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:44:02.417889 258086 addons.go:70] Setting storage-provisioner=true in profile "no-preload-999106"
I1123 08:44:02.417910 258086 addons.go:239] Setting addon storage-provisioner=true in "no-preload-999106"
I1123 08:44:02.417926 258086 addons.go:70] Setting default-storageclass=true in profile "no-preload-999106"
I1123 08:44:02.417947 258086 config.go:182] Loaded profile config "no-preload-999106": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:44:02.417950 258086 host.go:66] Checking if "no-preload-999106" exists ...
I1123 08:44:02.417952 258086 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-999106"
I1123 08:44:02.418452 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:44:02.418590 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:44:02.419817 258086 out.go:179] * Verifying Kubernetes components...
I1123 08:44:02.422556 258086 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:44:02.448285 258086 addons.go:239] Setting addon default-storageclass=true in "no-preload-999106"
I1123 08:44:02.448336 258086 host.go:66] Checking if "no-preload-999106" exists ...
I1123 08:44:02.448496 258086 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:44:02.448879 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:44:02.449866 258086 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:44:02.449888 258086 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:44:02.449940 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:44:02.479849 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:44:02.481186 258086 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:44:02.481210 258086 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:44:02.481267 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:44:02.506758 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:44:02.518200 258086 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1123 08:44:02.581982 258086 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:44:02.612639 258086 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:44:02.629441 258086 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:44:02.722551 258086 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1123 08:44:02.724186 258086 node_ready.go:35] waiting up to 6m0s for node "no-preload-999106" to be "Ready" ...
I1123 08:44:02.952603 258086 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1123 08:44:02.531044 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:44:02.531451 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:44:02.531515 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:44:02.531572 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:44:02.568683 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:02.568716 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:02.568723 206485 cri.go:89] found id: ""
I1123 08:44:02.568732 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:44:02.568799 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.573171 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.577424 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:44:02.577582 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:44:02.618894 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:02.618923 206485 cri.go:89] found id: ""
I1123 08:44:02.618932 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:44:02.618987 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.624397 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:44:02.624456 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:44:02.659100 206485 cri.go:89] found id: ""
I1123 08:44:02.659131 206485 logs.go:282] 0 containers: []
W1123 08:44:02.659143 206485 logs.go:284] No container was found matching "coredns"
I1123 08:44:02.659151 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:44:02.659213 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:44:02.694829 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:02.694848 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:02.694852 206485 cri.go:89] found id: ""
I1123 08:44:02.694859 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:44:02.694907 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.700604 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.705763 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:44:02.705843 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:44:02.741480 206485 cri.go:89] found id: ""
I1123 08:44:02.741510 206485 logs.go:282] 0 containers: []
W1123 08:44:02.741523 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:44:02.741529 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:44:02.741595 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:44:02.778417 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:02.778442 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:02.778448 206485 cri.go:89] found id: ""
I1123 08:44:02.778456 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:44:02.778518 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.784422 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.789717 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:44:02.789794 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:44:02.821165 206485 cri.go:89] found id: ""
I1123 08:44:02.821194 206485 logs.go:282] 0 containers: []
W1123 08:44:02.821205 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:44:02.821216 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:44:02.821271 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:44:02.852719 206485 cri.go:89] found id: ""
I1123 08:44:02.852745 206485 logs.go:282] 0 containers: []
W1123 08:44:02.852754 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:44:02.852766 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:44:02.852785 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:02.892590 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:44:02.892629 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:02.926138 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:44:02.926174 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:02.962943 206485 logs.go:123] Gathering logs for container status ...
I1123 08:44:02.962982 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:44:02.999133 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:44:02.999165 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:44:03.103866 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:44:03.103901 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:44:03.118230 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:44:03.118258 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:03.152826 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:44:03.152853 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:03.207774 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:44:03.207809 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:44:03.255093 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:44:03.255135 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:44:03.316127 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:44:03.316156 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:44:03.316171 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:03.350816 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:44:03.350855 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:05.885724 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:44:05.886146 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:44:05.886208 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:44:05.886271 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:44:05.912631 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:05.912667 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:05.912672 206485 cri.go:89] found id: ""
I1123 08:44:05.912681 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:44:05.912736 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:05.916915 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:05.920714 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:44:05.920785 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:44:05.948197 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:05.948226 206485 cri.go:89] found id: ""
I1123 08:44:05.948237 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:44:05.948297 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:05.952344 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:44:05.952394 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:44:05.979281 206485 cri.go:89] found id: ""
I1123 08:44:05.979302 206485 logs.go:282] 0 containers: []
W1123 08:44:05.979309 206485 logs.go:284] No container was found matching "coredns"
I1123 08:44:05.979315 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:44:05.979360 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:44:06.005748 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:06.005775 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:06.005781 206485 cri.go:89] found id: ""
I1123 08:44:06.005790 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:44:06.005842 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.009813 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.013567 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:44:06.013631 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:44:06.040041 206485 cri.go:89] found id: ""
I1123 08:44:06.040069 206485 logs.go:282] 0 containers: []
W1123 08:44:06.040082 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:44:06.040090 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:44:06.040146 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:44:06.068400 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:06.068423 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:06.068428 206485 cri.go:89] found id: ""
I1123 08:44:06.068435 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:44:06.068489 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.072472 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.076295 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:44:06.076354 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:44:06.102497 206485 cri.go:89] found id: ""
I1123 08:44:06.102525 206485 logs.go:282] 0 containers: []
W1123 08:44:06.102538 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:44:06.102546 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:44:06.102607 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:44:06.130104 206485 cri.go:89] found id: ""
I1123 08:44:06.130125 206485 logs.go:282] 0 containers: []
W1123 08:44:06.130132 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:44:06.130141 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:44:06.130150 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:44:06.219429 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:44:06.219465 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:44:06.278463 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:44:06.278491 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:44:06.278507 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:06.315308 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:44:06.315344 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:06.374595 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:44:06.374627 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:06.404338 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:44:06.404365 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:44:06.453101 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:44:06.453130 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:44:06.466457 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:44:06.466503 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:06.499235 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:44:06.499264 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:06.531782 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:44:06.531811 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:06.567190 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:44:06.567225 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:06.595596 206485 logs.go:123] Gathering logs for container status ...
I1123 08:44:06.595626 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:44:02.953927 258086 addons.go:530] duration metric: took 536.142427ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1123 08:44:03.227564 258086 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-999106" context rescaled to 1 replicas
W1123 08:44:04.727505 258086 node_ready.go:57] node "no-preload-999106" has "Ready":"False" status (will retry)
W1123 08:44:07.227319 258086 node_ready.go:57] node "no-preload-999106" has "Ready":"False" status (will retry)
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
1357388ae0aa5 56cc512116c8f 8 seconds ago Running busybox 0 34632f38cdf63 busybox default
80475d9bc2771 ead0a4a53df89 13 seconds ago Running coredns 0 cd75a3dc79d90 coredns-5dd5756b68-2fdsv kube-system
089b66b211cc0 6e38f40d628db 13 seconds ago Running storage-provisioner 0 8489f4374b9ca storage-provisioner kube-system
39b3d72b0119b 409467f978b4a 24 seconds ago Running kindnet-cni 0 4e7fe0b0a93a6 kindnet-q8xnm kube-system
ef4e4389e44ca ea1030da44aa1 27 seconds ago Running kube-proxy 0 5b9d69d308423 kube-proxy-tzq9b kube-system
0ef7f303a2ce3 f6f496300a2ae 46 seconds ago Running kube-scheduler 0 2757f6f1f2847 kube-scheduler-old-k8s-version-204346 kube-system
8f2985624466e 4be79c38a4bab 46 seconds ago Running kube-controller-manager 0 7d13da4692cf0 kube-controller-manager-old-k8s-version-204346 kube-system
328d012e2a9c6 bb5e0dde9054c 46 seconds ago Running kube-apiserver 0 801b406a053e0 kube-apiserver-old-k8s-version-204346 kube-system
09bd2ad51bcbe 73deb9a3f7025 46 seconds ago Running etcd 0 bd3a3ff71b569 etcd-old-k8s-version-204346 kube-system
==> containerd <==
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.554367695Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-2fdsv,Uid:1c71e052-b3c2-4875-8aeb-7d724ee26e06,Namespace:kube-system,Attempt:0,} returns sandbox id \"cd75a3dc79d9055a439d60e0b8c3a0eaf0c09774664074c042478ddbd42d8ed7\""
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.557881691Z" level=info msg="CreateContainer within sandbox \"cd75a3dc79d9055a439d60e0b8c3a0eaf0c09774664074c042478ddbd42d8ed7\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.565420837Z" level=info msg="Container 80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a: CDI devices from CRI Config.CDIDevices: []"
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.572367270Z" level=info msg="CreateContainer within sandbox \"cd75a3dc79d9055a439d60e0b8c3a0eaf0c09774664074c042478ddbd42d8ed7\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a\""
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.573105266Z" level=info msg="StartContainer for \"80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a\""
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.573985605Z" level=info msg="connecting to shim 80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a" address="unix:///run/containerd/s/402875f21b0b7b033dcd7b3cca8f2720835d3f90418b17dd5f3df52485b09e0c" protocol=ttrpc version=3
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.602588352Z" level=info msg="StartContainer for \"089b66b211cc086767c9fdf40aba06bcf7b4484c0976381a4bdf51afe2621f61\" returns successfully"
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.630751490Z" level=info msg="StartContainer for \"80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a\" returns successfully"
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.171495043Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:85a1fcd5-ee10-4749-9dec-40efed82eb3e,Namespace:default,Attempt:0,}"
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.210794452Z" level=info msg="connecting to shim 34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996" address="unix:///run/containerd/s/9131634b5b9e099a09d55b33b67bba908aad637f11b87abf7ed2211b15f763a9" namespace=k8s.io protocol=ttrpc version=3
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.287286149Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:85a1fcd5-ee10-4749-9dec-40efed82eb3e,Namespace:default,Attempt:0,} returns sandbox id \"34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996\""
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.289225870Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.394106458Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.394929355Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396643"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.396449964Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.399611876Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.400256412Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.110984688s"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.400309785Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.402701592Z" level=info msg="CreateContainer within sandbox \"34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.410744826Z" level=info msg="Container 1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5: CDI devices from CRI Config.CDIDevices: []"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.419870192Z" level=info msg="CreateContainer within sandbox \"34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.421053047Z" level=info msg="StartContainer for \"1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.422071051Z" level=info msg="connecting to shim 1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5" address="unix:///run/containerd/s/9131634b5b9e099a09d55b33b67bba908aad637f11b87abf7ed2211b15f763a9" protocol=ttrpc version=3
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.495260690Z" level=info msg="StartContainer for \"1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5\" returns successfully"
Nov 23 08:44:09 old-k8s-version-204346 containerd[661]: E1123 08:44:09.948064 661 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 25cf5af2951e282c4b0e961a02fb5d3e57c974501832fee92eec17b5135b9ec9d9e87d2ac94e6d117a5ed3dd54e8800aa7b4479706eb54497145ccdb80397d1b
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:38064 - 25011 "HINFO IN 3150570816276822377.3169321318277058455. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.024835318s
==> describe nodes <==
Name: old-k8s-version-204346
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-204346
kubernetes.io/os=linux
minikube.k8s.io/commit=50c3a8a3c03e8a84b6c978a884d21c3de8c6d4f1
minikube.k8s.io/name=old-k8s-version-204346
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T08_43_30_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 08:43:26 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-204346
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 08:44:10 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:25 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:25 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:25 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.103.2
Hostname: old-k8s-version-204346
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: ddf0e41b-1230-4041-b2b0-aca7ba0a6fe4
Boot ID: 3bab2277-1db4-4284-9fcc-5d1d58e87eb4
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12s
kube-system coredns-5dd5756b68-2fdsv 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 29s
kube-system etcd-old-k8s-version-204346 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 42s
kube-system kindnet-q8xnm 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 29s
kube-system kube-apiserver-old-k8s-version-204346 250m (3%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-controller-manager-old-k8s-version-204346 200m (2%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system kube-proxy-tzq9b 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
kube-system kube-scheduler-old-k8s-version-204346 100m (1%) 0 (0%) 0 (0%) 0 (0%) 42s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 27s kube-proxy
Normal Starting 47s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 47s (x8 over 47s) kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 47s (x8 over 47s) kubelet Node old-k8s-version-204346 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 47s (x7 over 47s) kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 47s kubelet Updated Node Allocatable limit across pods
Normal Starting 42s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 42s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 42s kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 42s kubelet Node old-k8s-version-204346 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 42s kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientPID
Normal RegisteredNode 29s node-controller Node old-k8s-version-204346 event: Registered Node old-k8s-version-204346 in Controller
Normal NodeReady 14s kubelet Node old-k8s-version-204346 status is now: NodeReady
==> dmesg <==
[Nov23 07:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.000998] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001000] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.084011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.395963] i8042: Warning: Keylock active
[ +0.012075] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.497035] block sda: the capability attribute has been deprecated.
[ +0.088048] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.022581] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.308229] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [09bd2ad51bcbe3133715a0348c39fbd488688f92fdc757fef7b242366c6eb72b] <==
{"level":"info","ts":"2025-11-23T08:43:25.072307Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 switched to configuration voters=(17451554867067011209)"}
{"level":"info","ts":"2025-11-23T08:43:25.072449Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","added-peer-id":"f23060b075c4c089","added-peer-peer-urls":["https://192.168.103.2:2380"]}
{"level":"info","ts":"2025-11-23T08:43:25.073769Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-23T08:43:25.074175Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"f23060b075c4c089","initial-advertise-peer-urls":["https://192.168.103.2:2380"],"listen-peer-urls":["https://192.168.103.2:2380"],"advertise-client-urls":["https://192.168.103.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.103.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-23T08:43:25.073803Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.103.2:2380"}
{"level":"info","ts":"2025-11-23T08:43:25.074517Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.103.2:2380"}
{"level":"info","ts":"2025-11-23T08:43:25.074362Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-23T08:43:25.459144Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-23T08:43:25.459188Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-23T08:43:25.459233Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 received MsgPreVoteResp from f23060b075c4c089 at term 1"}
{"level":"info","ts":"2025-11-23T08:43:25.459253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became candidate at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.459261Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 received MsgVoteResp from f23060b075c4c089 at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.459281Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became leader at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.459298Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: f23060b075c4c089 elected leader f23060b075c4c089 at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.460336Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"f23060b075c4c089","local-member-attributes":"{Name:old-k8s-version-204346 ClientURLs:[https://192.168.103.2:2379]}","request-path":"/0/members/f23060b075c4c089/attributes","cluster-id":"3336683c081d149d","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-23T08:43:25.460368Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:43:25.460352Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.460547Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-23T08:43:25.46207Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-23T08:43:25.460343Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:43:25.46151Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.462309Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.462347Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.461945Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-23T08:43:25.466791Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.103.2:2379"}
==> kernel <==
08:44:11 up 1:26, 0 user, load average: 2.68, 2.53, 1.78
Linux old-k8s-version-204346 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [39b3d72b0119bcc6ecd6e57b170ea19f5592bba7f48f0436c996349c8ca348dd] <==
I1123 08:43:46.866967 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1123 08:43:46.867287 1 main.go:139] hostIP = 192.168.103.2
podIP = 192.168.103.2
I1123 08:43:46.867434 1 main.go:148] setting mtu 1500 for CNI
I1123 08:43:46.867454 1 main.go:178] kindnetd IP family: "ipv4"
I1123 08:43:46.867482 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-23T08:43:47Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1123 08:43:47.067711 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1123 08:43:47.067748 1 controller.go:381] "Waiting for informer caches to sync"
I1123 08:43:47.067760 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1123 08:43:47.067904 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1123 08:43:47.369355 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1123 08:43:47.369384 1 metrics.go:72] Registering metrics
I1123 08:43:47.369441 1 controller.go:711] "Syncing nftables rules"
I1123 08:43:57.076844 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1123 08:43:57.076915 1 main.go:301] handling current node
I1123 08:44:07.068039 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1123 08:44:07.068093 1 main.go:301] handling current node
==> kube-apiserver [328d012e2a9c60b89bce2737c3bcb6c1f31581c21f2a3f2969cf002ad66bc982] <==
I1123 08:43:26.887380 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1123 08:43:26.887389 1 cache.go:39] Caches are synced for autoregister controller
I1123 08:43:26.887641 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1123 08:43:26.887685 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1123 08:43:26.887980 1 shared_informer.go:318] Caches are synced for configmaps
I1123 08:43:26.888304 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
E1123 08:43:26.889201 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1123 08:43:26.889373 1 controller.go:624] quota admission added evaluator for: namespaces
I1123 08:43:26.893730 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1123 08:43:27.092344 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1123 08:43:27.794220 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1123 08:43:27.798285 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1123 08:43:27.798301 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1123 08:43:28.278123 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 08:43:28.347605 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 08:43:28.396516 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1123 08:43:28.402119 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.103.2]
I1123 08:43:28.403251 1 controller.go:624] quota admission added evaluator for: endpoints
I1123 08:43:28.410689 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 08:43:28.846011 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1123 08:43:29.796332 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1123 08:43:29.808173 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1123 08:43:29.820075 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1123 08:43:42.454084 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1123 08:43:42.555727 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [8f2985624466e7aea2ab0922f065c597c0bfd5950e9a7d9af9278d532ea162aa] <==
I1123 08:43:42.301940 1 shared_informer.go:318] Caches are synced for resource quota
I1123 08:43:42.313117 1 shared_informer.go:318] Caches are synced for endpoint
I1123 08:43:42.320707 1 shared_informer.go:318] Caches are synced for resource quota
I1123 08:43:42.468731 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-tzq9b"
I1123 08:43:42.470032 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-q8xnm"
I1123 08:43:42.562465 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1123 08:43:42.637391 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:43:42.693556 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:43:42.693596 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1123 08:43:42.710317 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-j49bt"
I1123 08:43:42.720116 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-2fdsv"
I1123 08:43:42.729591 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="167.450584ms"
I1123 08:43:42.750029 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="20.070236ms"
I1123 08:43:42.772635 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="22.530968ms"
I1123 08:43:42.772808 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="87.9µs"
I1123 08:43:42.817260 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1123 08:43:42.828181 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-j49bt"
I1123 08:43:42.834660 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="17.534321ms"
I1123 08:43:42.847353 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="12.631926ms"
I1123 08:43:42.847627 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="198.148µs"
I1123 08:43:57.121773 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="186.5µs"
I1123 08:43:57.150540 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="96.97µs"
I1123 08:43:57.197693 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1123 08:43:57.981361 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.07769ms"
I1123 08:43:57.981507 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="71.031µs"
==> kube-proxy [ef4e4389e44ca59002bc45aac4774894eff14408a6f6654c403f41a7f5ae9178] <==
I1123 08:43:43.138692 1 server_others.go:69] "Using iptables proxy"
I1123 08:43:43.148849 1 node.go:141] Successfully retrieved node IP: 192.168.103.2
I1123 08:43:43.173806 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1123 08:43:43.177107 1 server_others.go:152] "Using iptables Proxier"
I1123 08:43:43.177190 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1123 08:43:43.177209 1 server_others.go:438] "Defaulting to no-op detect-local"
I1123 08:43:43.177247 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1123 08:43:43.177554 1 server.go:846] "Version info" version="v1.28.0"
I1123 08:43:43.177673 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:43:43.178478 1 config.go:188] "Starting service config controller"
I1123 08:43:43.178510 1 shared_informer.go:311] Waiting for caches to sync for service config
I1123 08:43:43.179694 1 config.go:97] "Starting endpoint slice config controller"
I1123 08:43:43.179818 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1123 08:43:43.180065 1 config.go:315] "Starting node config controller"
I1123 08:43:43.180084 1 shared_informer.go:311] Waiting for caches to sync for node config
I1123 08:43:43.280364 1 shared_informer.go:318] Caches are synced for service config
I1123 08:43:43.280485 1 shared_informer.go:318] Caches are synced for node config
I1123 08:43:43.280575 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [0ef7f303a2ce364a193b1c3a534acf3ce3197306c4c2cc9dd0d5717ae9adf953] <==
W1123 08:43:26.854417 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1123 08:43:26.854437 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1123 08:43:26.854443 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 08:43:26.854473 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 08:43:26.854661 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1123 08:43:26.854686 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1123 08:43:26.854994 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1123 08:43:26.855027 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1123 08:43:27.681328 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1123 08:43:27.681369 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1123 08:43:27.807379 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 08:43:27.807413 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 08:43:27.818838 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1123 08:43:27.818882 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1123 08:43:27.819991 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1123 08:43:27.820027 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1123 08:43:27.871687 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1123 08:43:27.871733 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1123 08:43:27.919852 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1123 08:43:27.919895 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1123 08:43:28.036804 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1123 08:43:28.036839 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1123 08:43:28.055978 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1123 08:43:28.056016 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
I1123 08:43:29.649311 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.141354 1529 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.142046 1529 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.476770 1529 topology_manager.go:215] "Topology Admit Handler" podUID="5d122719-2577-438f-bae7-72a1034f88ef" podNamespace="kube-system" podName="kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.478900 1529 topology_manager.go:215] "Topology Admit Handler" podUID="c3178adf-8eb3-4210-9674-fdda89d3317d" podNamespace="kube-system" podName="kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651490 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksdwp\" (UniqueName: \"kubernetes.io/projected/5d122719-2577-438f-bae7-72a1034f88ef-kube-api-access-ksdwp\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651698 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c3178adf-8eb3-4210-9674-fdda89d3317d-lib-modules\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651862 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5d122719-2577-438f-bae7-72a1034f88ef-lib-modules\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651898 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/c3178adf-8eb3-4210-9674-fdda89d3317d-cni-cfg\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651928 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/c3178adf-8eb3-4210-9674-fdda89d3317d-xtables-lock\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651960 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9ntt\" (UniqueName: \"kubernetes.io/projected/c3178adf-8eb3-4210-9674-fdda89d3317d-kube-api-access-m9ntt\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651992 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/5d122719-2577-438f-bae7-72a1034f88ef-kube-proxy\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.652021 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/5d122719-2577-438f-bae7-72a1034f88ef-xtables-lock\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:46 old-k8s-version-204346 kubelet[1529]: I1123 08:43:46.940830 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-tzq9b" podStartSLOduration=4.940768474 podCreationTimestamp="2025-11-23 08:43:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:43:43.932316562 +0000 UTC m=+14.168739010" watchObservedRunningTime="2025-11-23 08:43:46.940768474 +0000 UTC m=+17.177190922"
Nov 23 08:43:46 old-k8s-version-204346 kubelet[1529]: I1123 08:43:46.940988 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-q8xnm" podStartSLOduration=1.718157541 podCreationTimestamp="2025-11-23 08:43:42 +0000 UTC" firstStartedPulling="2025-11-23 08:43:43.30687244 +0000 UTC m=+13.543294877" lastFinishedPulling="2025-11-23 08:43:46.52967151 +0000 UTC m=+16.766093948" observedRunningTime="2025-11-23 08:43:46.940594815 +0000 UTC m=+17.177017264" watchObservedRunningTime="2025-11-23 08:43:46.940956612 +0000 UTC m=+17.177379059"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.093693 1529 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.122486 1529 topology_manager.go:215] "Topology Admit Handler" podUID="1c71e052-b3c2-4875-8aeb-7d724ee26e06" podNamespace="kube-system" podName="coredns-5dd5756b68-2fdsv"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.122759 1529 topology_manager.go:215] "Topology Admit Handler" podUID="372382d8-d23f-4e6d-89ae-8f2c9c46b6dc" podNamespace="kube-system" podName="storage-provisioner"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263400 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c71e052-b3c2-4875-8aeb-7d724ee26e06-config-volume\") pod \"coredns-5dd5756b68-2fdsv\" (UID: \"1c71e052-b3c2-4875-8aeb-7d724ee26e06\") " pod="kube-system/coredns-5dd5756b68-2fdsv"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263464 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-474bl\" (UniqueName: \"kubernetes.io/projected/1c71e052-b3c2-4875-8aeb-7d724ee26e06-kube-api-access-474bl\") pod \"coredns-5dd5756b68-2fdsv\" (UID: \"1c71e052-b3c2-4875-8aeb-7d724ee26e06\") " pod="kube-system/coredns-5dd5756b68-2fdsv"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263575 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/372382d8-d23f-4e6d-89ae-8f2c9c46b6dc-tmp\") pod \"storage-provisioner\" (UID: \"372382d8-d23f-4e6d-89ae-8f2c9c46b6dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263625 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cbg7\" (UniqueName: \"kubernetes.io/projected/372382d8-d23f-4e6d-89ae-8f2c9c46b6dc-kube-api-access-2cbg7\") pod \"storage-provisioner\" (UID: \"372382d8-d23f-4e6d-89ae-8f2c9c46b6dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.963727 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.963673229 podCreationTimestamp="2025-11-23 08:43:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:43:57.963551537 +0000 UTC m=+28.199973987" watchObservedRunningTime="2025-11-23 08:43:57.963673229 +0000 UTC m=+28.200095677"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.974383 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-2fdsv" podStartSLOduration=15.974330092 podCreationTimestamp="2025-11-23 08:43:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:43:57.974110377 +0000 UTC m=+28.210532825" watchObservedRunningTime="2025-11-23 08:43:57.974330092 +0000 UTC m=+28.210752539"
Nov 23 08:43:59 old-k8s-version-204346 kubelet[1529]: I1123 08:43:59.862724 1529 topology_manager.go:215] "Topology Admit Handler" podUID="85a1fcd5-ee10-4749-9dec-40efed82eb3e" podNamespace="default" podName="busybox"
Nov 23 08:43:59 old-k8s-version-204346 kubelet[1529]: I1123 08:43:59.981400 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdg6d\" (UniqueName: \"kubernetes.io/projected/85a1fcd5-ee10-4749-9dec-40efed82eb3e-kube-api-access-tdg6d\") pod \"busybox\" (UID: \"85a1fcd5-ee10-4749-9dec-40efed82eb3e\") " pod="default/busybox"
==> storage-provisioner [089b66b211cc086767c9fdf40aba06bcf7b4484c0976381a4bdf51afe2621f61] <==
I1123 08:43:57.613751 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 08:43:57.624633 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 08:43:57.624700 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1123 08:43:57.633950 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1123 08:43:57.634082 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"a0771e73-2533-4e9a-bd83-ee78487b1f50", APIVersion:"v1", ResourceVersion:"432", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-204346_bff6cf86-fcf0-4fe3-b85e-b85b2509b23f became leader
I1123 08:43:57.634291 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-204346_bff6cf86-fcf0-4fe3-b85e-b85b2509b23f!
I1123 08:43:57.734684 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-204346_bff6cf86-fcf0-4fe3-b85e-b85b2509b23f!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-204346 -n old-k8s-version-204346
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-204346 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect old-k8s-version-204346
helpers_test.go:243: (dbg) docker inspect old-k8s-version-204346:
-- stdout --
[
{
"Id": "74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716",
"Created": "2025-11-23T08:43:13.914336238Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 255015,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-11-23T08:43:13.954859222Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:133ca4ac39008d0056ad45d8cb70521d6b70d6e1b8bbff4678fd4b354efbdf70",
"ResolvConfPath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/hostname",
"HostsPath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/hosts",
"LogPath": "/var/lib/docker/containers/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716/74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716-json.log",
"Name": "/old-k8s-version-204346",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"old-k8s-version-204346:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "old-k8s-version-204346",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": null,
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "74b9ec6867739b46c46d250281e773e2e1e6e55633355a3143f6c35242c78716",
"LowerDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3-init/diff:/var/lib/docker/overlay2/ee04ca8b85d0dedeb02bd9a5189a59a7f53ca89a011d262a78df32fa43bf0598/diff",
"MergedDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3/merged",
"UpperDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3/diff",
"WorkDir": "/var/lib/docker/overlay2/c1a2c09b9684904e47b03e9569e26d403b09f5d541f2cb59b94c6e639ed9b4e3/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "old-k8s-version-204346",
"Source": "/var/lib/docker/volumes/old-k8s-version-204346/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "old-k8s-version-204346",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "old-k8s-version-204346",
"name.minikube.sigs.k8s.io": "old-k8s-version-204346",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"SandboxID": "db03bea2ae002bb3595102e41f0b3c5dd373e7f121cbf490c03f867ac8b10fc2",
"SandboxKey": "/var/run/docker/netns/db03bea2ae00",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33058"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33059"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33062"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33060"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33061"
}
]
},
"Networks": {
"old-k8s-version-204346": {
"IPAMConfig": {
"IPv4Address": "192.168.103.2",
"IPv6Address": ""
},
"Links": null,
"Aliases": null,
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "2c3268f545c0648cec3972c75676102d767b9cbd699aea51b301ba1de04cad51",
"EndpointID": "a6fed4b2c7bb6c663b8e774c8e64911b07fef263695c45641973d777a7144fb2",
"Gateway": "192.168.103.1",
"IPAddress": "192.168.103.2",
"MacAddress": "1a:83:9b:a0:7e:0e",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"old-k8s-version-204346",
"74b9ec686773"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-204346 -n old-k8s-version-204346
helpers_test.go:252: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/old-k8s-version/serial/DeployApp]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-204346 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p old-k8s-version-204346 logs -n 25: (1.075446276s)
helpers_test.go:260: TestStartStop/group/old-k8s-version/serial/DeployApp logs:
-- stdout --
==> Audit <==
┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬───────────
──────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼───────────
──────────┤
│ start │ -p force-systemd-flag-570956 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ force-systemd-flag-570956 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p NoKubernetes-846693 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=containerd │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ ssh │ -p NoKubernetes-846693 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ │
│ ssh │ force-systemd-env-352249 ssh cat /etc/containerd/config.toml │ force-systemd-env-352249 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ delete │ -p force-systemd-env-352249 │ force-systemd-env-352249 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p cert-expiration-680868 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=containerd │ cert-expiration-680868 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ force-systemd-flag-570956 ssh cat /etc/containerd/config.toml │ force-systemd-flag-570956 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ delete │ -p force-systemd-flag-570956 │ force-systemd-flag-570956 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p cert-options-194967 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=containerd │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
│ stop │ -p NoKubernetes-846693 │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:41 UTC │
│ start │ -p NoKubernetes-846693 --driver=docker --container-runtime=containerd │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:41 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ -p NoKubernetes-846693 sudo systemctl is-active --quiet service kubelet │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ │
│ delete │ -p NoKubernetes-846693 │ NoKubernetes-846693 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p missing-upgrade-231159 --memory=3072 --driver=docker --container-runtime=containerd │ missing-upgrade-231159 │ jenkins │ v1.32.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ cert-options-194967 ssh openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ ssh │ -p cert-options-194967 -- sudo cat /etc/kubernetes/admin.conf │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ delete │ -p cert-options-194967 │ cert-options-194967 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p stopped-upgrade-595653 --memory=3072 --vm-driver=docker --container-runtime=containerd │ stopped-upgrade-595653 │ jenkins │ v1.32.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p missing-upgrade-231159 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ missing-upgrade-231159 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:43 UTC │
│ stop │ stopped-upgrade-595653 stop │ stopped-upgrade-595653 │ jenkins │ v1.32.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:42 UTC │
│ start │ -p stopped-upgrade-595653 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=containerd │ stopped-upgrade-595653 │ jenkins │ v1.37.0 │ 23 Nov 25 08:42 UTC │ 23 Nov 25 08:43 UTC │
│ delete │ -p stopped-upgrade-595653 │ stopped-upgrade-595653 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ 23 Nov 25 08:43 UTC │
│ start │ -p old-k8s-version-204346 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.28.0 │ old-k8s-version-204346 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ 23 Nov 25 08:43 UTC │
│ delete │ -p missing-upgrade-231159 │ missing-upgrade-231159 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ 23 Nov 25 08:43 UTC │
│ start │ -p no-preload-999106 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=containerd --kubernetes-version=v1.34.1 │ no-preload-999106 │ jenkins │ v1.37.0 │ 23 Nov 25 08:43 UTC │ │
└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴───────────
──────────┘
==> Last Start <==
Log file created at: 2025/11/23 08:43:27
Running on machine: ubuntu-20-agent-8
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 08:43:27.495640 258086 out.go:360] Setting OutFile to fd 1 ...
I1123 08:43:27.495743 258086 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:43:27.495751 258086 out.go:374] Setting ErrFile to fd 2...
I1123 08:43:27.495755 258086 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:43:27.495953 258086 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21969-13876/.minikube/bin
I1123 08:43:27.496394 258086 out.go:368] Setting JSON to false
I1123 08:43:27.497504 258086 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-8","uptime":5148,"bootTime":1763882259,"procs":304,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1123 08:43:27.497559 258086 start.go:143] virtualization: kvm guest
I1123 08:43:27.499449 258086 out.go:179] * [no-preload-999106] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1123 08:43:27.500767 258086 notify.go:221] Checking for updates...
I1123 08:43:27.500781 258086 out.go:179] - MINIKUBE_LOCATION=21969
I1123 08:43:27.502005 258086 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 08:43:27.503191 258086 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21969-13876/kubeconfig
I1123 08:43:27.504274 258086 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21969-13876/.minikube
I1123 08:43:27.505281 258086 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1123 08:43:27.506287 258086 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 08:43:27.507765 258086 config.go:182] Loaded profile config "cert-expiration-680868": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:43:27.507859 258086 config.go:182] Loaded profile config "kubernetes-upgrade-776670": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:43:27.507939 258086 config.go:182] Loaded profile config "old-k8s-version-204346": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:43:27.508012 258086 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 08:43:27.532390 258086 docker.go:124] docker version: linux-29.0.2:Docker Engine - Community
I1123 08:43:27.532462 258086 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:43:27.588863 258086 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:67 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-23 08:43:27.578321532 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 08:43:27.588959 258086 docker.go:319] overlay module found
I1123 08:43:27.590837 258086 out.go:179] * Using the docker driver based on user configuration
I1123 08:43:27.592139 258086 start.go:309] selected driver: docker
I1123 08:43:27.592164 258086 start.go:927] validating driver "docker" against <nil>
I1123 08:43:27.592175 258086 start.go:938] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 08:43:27.592773 258086 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1123 08:43:27.653421 258086 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:3 ContainersRunning:3 ContainersPaused:0 ContainersStopped:0 Images:4 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:64 OomKillDisable:false NGoroutines:76 SystemTime:2025-11-23 08:43:27.643267927 +0000 UTC LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-1044-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652072448 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-8 Labels:[] ExperimentalBuild:false ServerVersion:29.0.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fcd43222d6b07379a4be9786bda52438f0dd16a1 Expected:} RuncCommit:{ID:v1.3.3-0-gd842d771 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[m
ap[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.30.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.40.3] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner Vendor:Docker Inc. Version:v1.0.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I1123 08:43:27.653668 258086 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1123 08:43:27.653954 258086 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:43:27.655624 258086 out.go:179] * Using Docker driver with root privileges
I1123 08:43:27.656995 258086 cni.go:84] Creating CNI manager for ""
I1123 08:43:27.657071 258086 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:27.657084 258086 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I1123 08:43:27.657159 258086 start.go:353] cluster config:
{Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Containe
rRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:43:27.658480 258086 out.go:179] * Starting "no-preload-999106" primary control-plane node in "no-preload-999106" cluster
I1123 08:43:27.659678 258086 cache.go:134] Beginning downloading kic base image for docker with containerd
I1123 08:43:27.660749 258086 out.go:179] * Pulling base image v0.0.48-1763789673-21948 ...
I1123 08:43:27.661680 258086 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 08:43:27.661748 258086 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon
I1123 08:43:27.661771 258086 profile.go:143] Saving config to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/config.json ...
I1123 08:43:27.661801 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/config.json: {Name:mk1854d74e572dba5e78564093e1183622e9aa74 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:27.661927 258086 cache.go:107] acquiring lock: {Name:mka7418a84f8d9aaa890eb7bcafd158f0f845949 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.661970 258086 cache.go:107] acquiring lock: {Name:mke646091201bbef396ff67d16f0cce49990b355 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.661948 258086 cache.go:107] acquiring lock: {Name:mk929bb8e7363fd9f8d602565b078a816979b3d2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.661979 258086 cache.go:107] acquiring lock: {Name:mk667c169463661b7e999b395cc2d348440d0d0f Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662058 258086 cache.go:115] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 exists
I1123 08:43:27.662070 258086 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:27.662087 258086 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:27.662069 258086 cache.go:107] acquiring lock: {Name:mk4a8ffda79c57b59d9ec0be62cf6989cc0b3dc8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662104 258086 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:27.662089 258086 cache.go:107] acquiring lock: {Name:mkce85e18a9851767cd13073008b6382df083ea3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662080 258086 cache.go:107] acquiring lock: {Name:mk495076811ea27b7ee848ef73ebf58029c788de Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662200 258086 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:27.662257 258086 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:27.662073 258086 cache.go:96] cache image "gcr.io/k8s-minikube/storage-provisioner:v5" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5" took 113.368µs
I1123 08:43:27.662298 258086 cache.go:80] save to tar file gcr.io/k8s-minikube/storage-provisioner:v5 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 succeeded
I1123 08:43:27.662298 258086 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:27.662338 258086 cache.go:107] acquiring lock: {Name:mkc513b15aec17d5c3e77aa2e6131827198f8c26 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.662430 258086 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1123 08:43:27.663312 258086 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:27.663446 258086 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:27.663495 258086 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:27.663529 258086 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:27.663560 258086 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1123 08:43:27.663553 258086 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:27.663602 258086 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:27.683115 258086 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f in local docker daemon, skipping pull
I1123 08:43:27.683133 258086 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f exists in daemon, skipping load
I1123 08:43:27.683151 258086 cache.go:243] Successfully downloaded all kic artifacts
I1123 08:43:27.683188 258086 start.go:360] acquireMachinesLock for no-preload-999106: {Name:mk535dea2e363deaa61ac9c5041ac2d499c9efc5 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:43:27.683286 258086 start.go:364] duration metric: took 77.877µs to acquireMachinesLock for "no-preload-999106"
I1123 08:43:27.683314 258086 start.go:93] Provisioning new machine with config: &{Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServer
Name:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:43:27.683378 258086 start.go:125] createHost starting for "" (driver="docker")
I1123 08:43:23.886201 254114 out.go:252] - Booting up control plane ...
I1123 08:43:23.886286 254114 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:43:23.886377 254114 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:43:23.886992 254114 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:43:23.903197 254114 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:43:23.904138 254114 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:43:23.904196 254114 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:43:24.010365 254114 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I1123 08:43:28.512514 254114 kubeadm.go:319] [apiclient] All control plane components are healthy after 4.502224 seconds
I1123 08:43:28.512707 254114 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:43:28.525209 254114 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:43:29.051871 254114 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:43:29.052189 254114 kubeadm.go:319] [mark-control-plane] Marking the node old-k8s-version-204346 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:43:29.563746 254114 kubeadm.go:319] [bootstrap-token] Using token: kv40xr.vpl4w4wq1fqvcjbv
I1123 08:43:29.565119 254114 out.go:252] - Configuring RBAC rules ...
I1123 08:43:29.565274 254114 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:43:29.570668 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:43:29.578425 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:43:29.581516 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:43:29.584593 254114 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:43:29.588395 254114 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:43:29.599565 254114 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:43:29.809875 254114 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:43:29.974613 254114 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:43:29.975627 254114 kubeadm.go:319]
I1123 08:43:29.975755 254114 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:43:29.975777 254114 kubeadm.go:319]
I1123 08:43:29.975879 254114 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:43:29.975889 254114 kubeadm.go:319]
I1123 08:43:29.975929 254114 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:43:29.976013 254114 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:43:29.976095 254114 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:43:29.976109 254114 kubeadm.go:319]
I1123 08:43:29.976189 254114 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:43:29.976197 254114 kubeadm.go:319]
I1123 08:43:29.976265 254114 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:43:29.976274 254114 kubeadm.go:319]
I1123 08:43:29.976365 254114 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:43:29.976483 254114 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:43:29.976577 254114 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:43:29.976584 254114 kubeadm.go:319]
I1123 08:43:29.976725 254114 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:43:29.976849 254114 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:43:29.976864 254114 kubeadm.go:319]
I1123 08:43:29.976980 254114 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token kv40xr.vpl4w4wq1fqvcjbv \
I1123 08:43:29.977124 254114 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa \
I1123 08:43:29.977157 254114 kubeadm.go:319] --control-plane
I1123 08:43:29.977166 254114 kubeadm.go:319]
I1123 08:43:29.977310 254114 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:43:29.977319 254114 kubeadm.go:319]
I1123 08:43:29.977452 254114 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token kv40xr.vpl4w4wq1fqvcjbv \
I1123 08:43:29.977614 254114 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa
I1123 08:43:29.980159 254114 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1123 08:43:29.980378 254114 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:43:29.980409 254114 cni.go:84] Creating CNI manager for ""
I1123 08:43:29.980425 254114 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:29.984213 254114 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1123 08:43:27.685925 258086 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I1123 08:43:27.686123 258086 start.go:159] libmachine.API.Create for "no-preload-999106" (driver="docker")
I1123 08:43:27.686177 258086 client.go:173] LocalClient.Create starting
I1123 08:43:27.686233 258086 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem
I1123 08:43:27.686260 258086 main.go:143] libmachine: Decoding PEM data...
I1123 08:43:27.686276 258086 main.go:143] libmachine: Parsing certificate...
I1123 08:43:27.686316 258086 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem
I1123 08:43:27.686334 258086 main.go:143] libmachine: Decoding PEM data...
I1123 08:43:27.686346 258086 main.go:143] libmachine: Parsing certificate...
I1123 08:43:27.686738 258086 cli_runner.go:164] Run: docker network inspect no-preload-999106 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1123 08:43:27.705175 258086 cli_runner.go:211] docker network inspect no-preload-999106 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1123 08:43:27.705249 258086 network_create.go:284] running [docker network inspect no-preload-999106] to gather additional debugging logs...
I1123 08:43:27.705267 258086 cli_runner.go:164] Run: docker network inspect no-preload-999106
W1123 08:43:27.723756 258086 cli_runner.go:211] docker network inspect no-preload-999106 returned with exit code 1
I1123 08:43:27.723782 258086 network_create.go:287] error running [docker network inspect no-preload-999106]: docker network inspect no-preload-999106: exit status 1
stdout:
[]
stderr:
Error response from daemon: network no-preload-999106 not found
I1123 08:43:27.723796 258086 network_create.go:289] output of [docker network inspect no-preload-999106]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network no-preload-999106 not found
** /stderr **
I1123 08:43:27.723894 258086 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:43:27.742266 258086 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-5d8b9fdde185 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:76:1f:2b:8a:58:68} reservation:<nil>}
I1123 08:43:27.742817 258086 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-103255eb2e92 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:3a:bb:33:85:24:bc} reservation:<nil>}
I1123 08:43:27.743314 258086 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-fa9f597fddc6 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:b6:bb:01:5e:01:61} reservation:<nil>}
I1123 08:43:27.743832 258086 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-da43b5ed9d8a IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:8e:fe:29:08:73:55} reservation:<nil>}
I1123 08:43:27.744448 258086 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001c01e10}
I1123 08:43:27.744470 258086 network_create.go:124] attempt to create docker network no-preload-999106 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I1123 08:43:27.744518 258086 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=no-preload-999106 no-preload-999106
I1123 08:43:27.793693 258086 network_create.go:108] docker network no-preload-999106 192.168.85.0/24 created
I1123 08:43:27.793726 258086 kic.go:121] calculated static IP "192.168.85.2" for the "no-preload-999106" container
I1123 08:43:27.793798 258086 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1123 08:43:27.815508 258086 cli_runner.go:164] Run: docker volume create no-preload-999106 --label name.minikube.sigs.k8s.io=no-preload-999106 --label created_by.minikube.sigs.k8s.io=true
I1123 08:43:27.836788 258086 oci.go:103] Successfully created a docker volume no-preload-999106
I1123 08:43:27.836929 258086 cli_runner.go:164] Run: docker run --rm --name no-preload-999106-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-999106 --entrypoint /usr/bin/test -v no-preload-999106:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f -d /var/lib
I1123 08:43:27.851417 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1123 08:43:27.858908 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1123 08:43:27.860347 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1123 08:43:27.863442 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1123 08:43:27.865314 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1123 08:43:27.878248 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1123 08:43:27.889986 258086 cache.go:162] opening: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1123 08:43:27.973948 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 exists
I1123 08:43:27.973981 258086 cache.go:96] cache image "registry.k8s.io/pause:3.10.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1" took 311.645455ms
I1123 08:43:27.973999 258086 cache.go:80] save to tar file registry.k8s.io/pause:3.10.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 succeeded
I1123 08:43:28.304822 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 exists
I1123 08:43:28.304856 258086 cache.go:96] cache image "registry.k8s.io/kube-proxy:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1" took 642.854298ms
I1123 08:43:28.304870 258086 cache.go:80] save to tar file registry.k8s.io/kube-proxy:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 succeeded
I1123 08:43:28.332384 258086 oci.go:107] Successfully prepared a docker volume no-preload-999106
I1123 08:43:28.332436 258086 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
W1123 08:43:28.332544 258086 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W1123 08:43:28.332582 258086 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I1123 08:43:28.332628 258086 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1123 08:43:28.401507 258086 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname no-preload-999106 --name no-preload-999106 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=no-preload-999106 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=no-preload-999106 --network no-preload-999106 --ip 192.168.85.2 --volume no-preload-999106:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f
I1123 08:43:28.713710 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Running}}
I1123 08:43:28.734068 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:43:28.754748 258086 cli_runner.go:164] Run: docker exec no-preload-999106 stat /var/lib/dpkg/alternatives/iptables
I1123 08:43:28.804354 258086 oci.go:144] the created container "no-preload-999106" has a running status.
I1123 08:43:28.804388 258086 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa...
I1123 08:43:28.861878 258086 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1123 08:43:28.899755 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:43:28.921384 258086 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1123 08:43:28.921408 258086 kic_runner.go:114] Args: [docker exec --privileged no-preload-999106 chown docker:docker /home/docker/.ssh/authorized_keys]
I1123 08:43:28.971140 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:43:28.992543 258086 machine.go:94] provisionDockerMachine start ...
I1123 08:43:28.992659 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:29.017873 258086 main.go:143] libmachine: Using SSH client type: native
I1123 08:43:29.018228 258086 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1123 08:43:29.018252 258086 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:43:29.019229 258086 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:57704->127.0.0.1:33063: read: connection reset by peer
I1123 08:43:29.339938 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 exists
I1123 08:43:29.339967 258086 cache.go:96] cache image "registry.k8s.io/coredns/coredns:v1.12.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1" took 1.677878189s
I1123 08:43:29.339993 258086 cache.go:80] save to tar file registry.k8s.io/coredns/coredns:v1.12.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 succeeded
I1123 08:43:29.349964 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 exists
I1123 08:43:29.349997 258086 cache.go:96] cache image "registry.k8s.io/kube-scheduler:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1" took 1.688022096s
I1123 08:43:29.350017 258086 cache.go:80] save to tar file registry.k8s.io/kube-scheduler:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 succeeded
I1123 08:43:29.423577 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 exists
I1123 08:43:29.423607 258086 cache.go:96] cache image "registry.k8s.io/kube-controller-manager:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1" took 1.761664135s
I1123 08:43:29.423620 258086 cache.go:80] save to tar file registry.k8s.io/kube-controller-manager:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 succeeded
I1123 08:43:29.487535 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 exists
I1123 08:43:29.487565 258086 cache.go:96] cache image "registry.k8s.io/kube-apiserver:v1.34.1" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1" took 1.825655813s
I1123 08:43:29.487576 258086 cache.go:80] save to tar file registry.k8s.io/kube-apiserver:v1.34.1 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 succeeded
I1123 08:43:29.829693 258086 cache.go:157] /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 exists
I1123 08:43:29.829727 258086 cache.go:96] cache image "registry.k8s.io/etcd:3.6.4-0" -> "/home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0" took 2.16770936s
I1123 08:43:29.829741 258086 cache.go:80] save to tar file registry.k8s.io/etcd:3.6.4-0 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 succeeded
I1123 08:43:29.829763 258086 cache.go:87] Successfully saved all images to host disk.
I1123 08:43:32.164591 258086 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-999106
I1123 08:43:32.164618 258086 ubuntu.go:182] provisioning hostname "no-preload-999106"
I1123 08:43:32.164701 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.183134 258086 main.go:143] libmachine: Using SSH client type: native
I1123 08:43:32.183339 258086 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1123 08:43:32.183352 258086 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-999106 && echo "no-preload-999106" | sudo tee /etc/hostname
I1123 08:43:32.340889 258086 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-999106
I1123 08:43:32.340971 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.359419 258086 main.go:143] libmachine: Using SSH client type: native
I1123 08:43:32.359677 258086 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 127.0.0.1 33063 <nil> <nil>}
I1123 08:43:32.359696 258086 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-999106' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-999106/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-999106' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:43:29.985991 254114 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1123 08:43:29.990966 254114 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.0/kubectl ...
I1123 08:43:29.990985 254114 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1123 08:43:30.005005 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1123 08:43:30.649440 254114 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:43:30.649546 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:30.649581 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes old-k8s-version-204346 minikube.k8s.io/updated_at=2025_11_23T08_43_30_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=50c3a8a3c03e8a84b6c978a884d21c3de8c6d4f1 minikube.k8s.io/name=old-k8s-version-204346 minikube.k8s.io/primary=true
I1123 08:43:30.659700 254114 ops.go:34] apiserver oom_adj: -16
I1123 08:43:30.729410 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:31.230340 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:31.730113 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:32.230535 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:32.729772 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:32.505327 258086 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:43:32.505361 258086 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21969-13876/.minikube CaCertPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21969-13876/.minikube}
I1123 08:43:32.505408 258086 ubuntu.go:190] setting up certificates
I1123 08:43:32.505430 258086 provision.go:84] configureAuth start
I1123 08:43:32.505484 258086 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-999106
I1123 08:43:32.523951 258086 provision.go:143] copyHostCerts
I1123 08:43:32.524019 258086 exec_runner.go:144] found /home/jenkins/minikube-integration/21969-13876/.minikube/key.pem, removing ...
I1123 08:43:32.524033 258086 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21969-13876/.minikube/key.pem
I1123 08:43:32.524115 258086 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21969-13876/.minikube/key.pem (1675 bytes)
I1123 08:43:32.524235 258086 exec_runner.go:144] found /home/jenkins/minikube-integration/21969-13876/.minikube/ca.pem, removing ...
I1123 08:43:32.524248 258086 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21969-13876/.minikube/ca.pem
I1123 08:43:32.524289 258086 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21969-13876/.minikube/ca.pem (1078 bytes)
I1123 08:43:32.524373 258086 exec_runner.go:144] found /home/jenkins/minikube-integration/21969-13876/.minikube/cert.pem, removing ...
I1123 08:43:32.524383 258086 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21969-13876/.minikube/cert.pem
I1123 08:43:32.524416 258086 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21969-13876/.minikube/cert.pem (1123 bytes)
I1123 08:43:32.524499 258086 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21969-13876/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca-key.pem org=jenkins.no-preload-999106 san=[127.0.0.1 192.168.85.2 localhost minikube no-preload-999106]
I1123 08:43:32.587554 258086 provision.go:177] copyRemoteCerts
I1123 08:43:32.587609 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:43:32.587655 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.605984 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:32.708249 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1123 08:43:32.727969 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 08:43:32.747752 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I1123 08:43:32.766001 258086 provision.go:87] duration metric: took 260.555897ms to configureAuth
I1123 08:43:32.766029 258086 ubuntu.go:206] setting minikube options for container-runtime
I1123 08:43:32.766187 258086 config.go:182] Loaded profile config "no-preload-999106": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:43:32.766198 258086 machine.go:97] duration metric: took 3.773633247s to provisionDockerMachine
I1123 08:43:32.766204 258086 client.go:176] duration metric: took 5.080019183s to LocalClient.Create
I1123 08:43:32.766223 258086 start.go:167] duration metric: took 5.080101552s to libmachine.API.Create "no-preload-999106"
I1123 08:43:32.766232 258086 start.go:293] postStartSetup for "no-preload-999106" (driver="docker")
I1123 08:43:32.766242 258086 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:43:32.766283 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:43:32.766317 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.785085 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:32.889673 258086 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:43:32.893433 258086 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1123 08:43:32.893459 258086 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I1123 08:43:32.893470 258086 filesync.go:126] Scanning /home/jenkins/minikube-integration/21969-13876/.minikube/addons for local assets ...
I1123 08:43:32.893520 258086 filesync.go:126] Scanning /home/jenkins/minikube-integration/21969-13876/.minikube/files for local assets ...
I1123 08:43:32.893624 258086 filesync.go:149] local asset: /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem -> 174422.pem in /etc/ssl/certs
I1123 08:43:32.893761 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:43:32.902075 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem --> /etc/ssl/certs/174422.pem (1708 bytes)
I1123 08:43:32.921898 258086 start.go:296] duration metric: took 155.652278ms for postStartSetup
I1123 08:43:32.922243 258086 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-999106
I1123 08:43:32.940711 258086 profile.go:143] Saving config to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/config.json ...
I1123 08:43:32.940999 258086 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1123 08:43:32.941041 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:32.959311 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:33.058968 258086 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1123 08:43:33.063670 258086 start.go:128] duration metric: took 5.380278318s to createHost
I1123 08:43:33.063696 258086 start.go:83] releasing machines lock for "no-preload-999106", held for 5.380396187s
I1123 08:43:33.063776 258086 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" no-preload-999106
I1123 08:43:33.082497 258086 ssh_runner.go:195] Run: cat /version.json
I1123 08:43:33.082555 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:33.082576 258086 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:43:33.082676 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:43:33.101516 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:33.101929 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:43:33.258150 258086 ssh_runner.go:195] Run: systemctl --version
I1123 08:43:33.265003 258086 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:43:33.270133 258086 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:43:33.270202 258086 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:43:33.301093 258086 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:43:33.301114 258086 start.go:496] detecting cgroup driver to use...
I1123 08:43:33.301140 258086 detect.go:190] detected "systemd" cgroup driver on host os
I1123 08:43:33.301187 258086 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:43:33.316380 258086 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:43:33.328339 258086 docker.go:218] disabling cri-docker service (if available) ...
I1123 08:43:33.328388 258086 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1123 08:43:33.344573 258086 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1123 08:43:33.362321 258086 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1123 08:43:33.449438 258086 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1123 08:43:33.532610 258086 docker.go:234] disabling docker service ...
I1123 08:43:33.532689 258086 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1123 08:43:33.551827 258086 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1123 08:43:33.564985 258086 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1123 08:43:33.650121 258086 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1123 08:43:33.736173 258086 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:43:33.749245 258086 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:43:33.764351 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:43:33.774567 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:43:33.784258 258086 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I1123 08:43:33.784327 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1123 08:43:33.794411 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:43:33.804033 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:43:33.812857 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:43:33.821787 258086 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:43:33.829930 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:43:33.839002 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:43:33.847926 258086 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:43:33.856822 258086 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:43:33.864542 258086 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:43:33.871885 258086 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:43:33.950854 258086 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:43:34.024458 258086 start.go:543] Will wait 60s for socket path /run/containerd/containerd.sock
I1123 08:43:34.024534 258086 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I1123 08:43:34.029083 258086 start.go:564] Will wait 60s for crictl version
I1123 08:43:34.029145 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.032799 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I1123 08:43:34.057987 258086 start.go:580] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: v2.1.5
RuntimeApiVersion: v1
I1123 08:43:34.058049 258086 ssh_runner.go:195] Run: containerd --version
I1123 08:43:34.078381 258086 ssh_runner.go:195] Run: containerd --version
I1123 08:43:34.100680 258086 out.go:179] * Preparing Kubernetes v1.34.1 on containerd 2.1.5 ...
I1123 08:43:36.163341 206485 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (10.069407293s)
W1123 08:43:36.163379 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Unable to connect to the server: net/http: TLS handshake timeout
output:
** stderr **
Unable to connect to the server: net/http: TLS handshake timeout
** /stderr **
I1123 08:43:36.163391 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:36.163401 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:36.196694 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:36.196725 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:36.230996 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:36.231018 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:36.266205 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:36.266235 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:34.101669 258086 cli_runner.go:164] Run: docker network inspect no-preload-999106 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1123 08:43:34.119192 258086 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I1123 08:43:34.123375 258086 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:43:34.134033 258086 kubeadm.go:884] updating cluster {Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA API
ServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:43:34.134129 258086 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime containerd
I1123 08:43:34.134170 258086 ssh_runner.go:195] Run: sudo crictl images --output json
I1123 08:43:34.159373 258086 containerd.go:623] couldn't find preloaded image for "registry.k8s.io/kube-apiserver:v1.34.1". assuming images are not preloaded.
I1123 08:43:34.159392 258086 cache_images.go:90] LoadCachedImages start: [registry.k8s.io/kube-apiserver:v1.34.1 registry.k8s.io/kube-controller-manager:v1.34.1 registry.k8s.io/kube-scheduler:v1.34.1 registry.k8s.io/kube-proxy:v1.34.1 registry.k8s.io/pause:3.10.1 registry.k8s.io/etcd:3.6.4-0 registry.k8s.io/coredns/coredns:v1.12.1 gcr.io/k8s-minikube/storage-provisioner:v5]
I1123 08:43:34.159438 258086 image.go:138] retrieving image: gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:34.159452 258086 image.go:138] retrieving image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.159485 258086 image.go:138] retrieving image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.159504 258086 image.go:138] retrieving image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.159534 258086 image.go:138] retrieving image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.159485 258086 image.go:138] retrieving image: registry.k8s.io/pause:3.10.1
I1123 08:43:34.159583 258086 image.go:138] retrieving image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.159658 258086 image.go:138] retrieving image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.161000 258086 image.go:181] daemon lookup for registry.k8s.io/etcd:3.6.4-0: Error response from daemon: No such image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.161332 258086 image.go:181] daemon lookup for registry.k8s.io/kube-scheduler:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.161540 258086 image.go:181] daemon lookup for gcr.io/k8s-minikube/storage-provisioner:v5: Error response from daemon: No such image: gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:34.161951 258086 image.go:181] daemon lookup for registry.k8s.io/kube-proxy:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.162137 258086 image.go:181] daemon lookup for registry.k8s.io/coredns/coredns:v1.12.1: Error response from daemon: No such image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.162179 258086 image.go:181] daemon lookup for registry.k8s.io/kube-controller-manager:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.162238 258086 image.go:181] daemon lookup for registry.k8s.io/pause:3.10.1: Error response from daemon: No such image: registry.k8s.io/pause:3.10.1
I1123 08:43:34.162370 258086 image.go:181] daemon lookup for registry.k8s.io/kube-apiserver:v1.34.1: Error response from daemon: No such image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.303423 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/coredns/coredns:v1.12.1" and sha "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969"
I1123 08:43:34.303507 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.304294 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/etcd:3.6.4-0" and sha "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115"
I1123 08:43:34.304346 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.325396 258086 cache_images.go:118] "registry.k8s.io/etcd:3.6.4-0" needs transfer: "registry.k8s.io/etcd:3.6.4-0" does not exist at hash "5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115" in container runtime
I1123 08:43:34.325443 258086 cri.go:218] Removing image: registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.325489 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.325396 258086 cache_images.go:118] "registry.k8s.io/coredns/coredns:v1.12.1" needs transfer: "registry.k8s.io/coredns/coredns:v1.12.1" does not exist at hash "52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969" in container runtime
I1123 08:43:34.325524 258086 cri.go:218] Removing image: registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.325560 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.329408 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.330479 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.332092 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/pause:3.10.1" and sha "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f"
I1123 08:43:34.332130 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/pause:3.10.1
I1123 08:43:34.334793 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-proxy:v1.34.1" and sha "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7"
I1123 08:43:34.334839 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.334892 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-scheduler:v1.34.1" and sha "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813"
I1123 08:43:34.334947 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.359405 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.359448 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.359453 258086 cache_images.go:118] "registry.k8s.io/pause:3.10.1" needs transfer: "registry.k8s.io/pause:3.10.1" does not exist at hash "cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f" in container runtime
I1123 08:43:34.359480 258086 cri.go:218] Removing image: registry.k8s.io/pause:3.10.1
I1123 08:43:34.359511 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.359927 258086 cache_images.go:118] "registry.k8s.io/kube-proxy:v1.34.1" needs transfer: "registry.k8s.io/kube-proxy:v1.34.1" does not exist at hash "fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7" in container runtime
I1123 08:43:34.359953 258086 cri.go:218] Removing image: registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.359986 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.362071 258086 cache_images.go:118] "registry.k8s.io/kube-scheduler:v1.34.1" needs transfer: "registry.k8s.io/kube-scheduler:v1.34.1" does not exist at hash "7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813" in container runtime
I1123 08:43:34.362107 258086 cri.go:218] Removing image: registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.362148 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.386773 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/coredns/coredns:v1.12.1
I1123 08:43:34.388038 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/etcd:3.6.4-0
I1123 08:43:34.388124 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1123 08:43:34.388148 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.388227 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.402862 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-controller-manager:v1.34.1" and sha "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f"
I1123 08:43:34.402936 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.406588 258086 containerd.go:267] Checking existence of image with name "registry.k8s.io/kube-apiserver:v1.34.1" and sha "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97"
I1123 08:43:34.406683 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.419900 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1
I1123 08:43:34.420019 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1
I1123 08:43:34.422632 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0
I1123 08:43:34.422820 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1123 08:43:34.422852 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0
I1123 08:43:34.422867 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.422905 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.432625 258086 cache_images.go:118] "registry.k8s.io/kube-controller-manager:v1.34.1" needs transfer: "registry.k8s.io/kube-controller-manager:v1.34.1" does not exist at hash "c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f" in container runtime
I1123 08:43:34.432698 258086 cri.go:218] Removing image: registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.432750 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.435170 258086 cache_images.go:118] "registry.k8s.io/kube-apiserver:v1.34.1" needs transfer: "registry.k8s.io/kube-apiserver:v1.34.1" does not exist at hash "c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97" in container runtime
I1123 08:43:34.435213 258086 cri.go:218] Removing image: registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.435236 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/coredns_v1.12.1: stat -c "%s %y" /var/lib/minikube/images/coredns_v1.12.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/coredns_v1.12.1': No such file or directory
I1123 08:43:34.435258 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:34.435263 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 --> /var/lib/minikube/images/coredns_v1.12.1 (22394368 bytes)
I1123 08:43:34.468602 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/etcd_3.6.4-0: stat -c "%s %y" /var/lib/minikube/images/etcd_3.6.4-0: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/etcd_3.6.4-0': No such file or directory
I1123 08:43:34.468621 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/pause:3.10.1
I1123 08:43:34.468654 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 --> /var/lib/minikube/images/etcd_3.6.4-0 (74320896 bytes)
I1123 08:43:34.468703 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-scheduler:v1.34.1
I1123 08:43:34.468726 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-proxy:v1.34.1
I1123 08:43:34.468757 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.468795 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.563471 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1
I1123 08:43:34.563530 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.563577 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1
I1123 08:43:34.563667 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1
I1123 08:43:34.563682 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.563581 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1
I1123 08:43:34.563706 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1
I1123 08:43:34.563755 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1
I1123 08:43:34.626877 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-scheduler_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-scheduler_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-scheduler_v1.34.1': No such file or directory
I1123 08:43:34.626895 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/pause_3.10.1: stat -c "%s %y" /var/lib/minikube/images/pause_3.10.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/pause_3.10.1': No such file or directory
I1123 08:43:34.626913 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 --> /var/lib/minikube/images/kube-scheduler_v1.34.1 (17396736 bytes)
I1123 08:43:34.626923 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 --> /var/lib/minikube/images/pause_3.10.1 (321024 bytes)
I1123 08:43:34.626927 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-proxy_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-proxy_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-proxy_v1.34.1': No such file or directory
I1123 08:43:34.626943 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 --> /var/lib/minikube/images/kube-proxy_v1.34.1 (25966080 bytes)
I1123 08:43:34.626974 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-controller-manager:v1.34.1
I1123 08:43:34.627042 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi registry.k8s.io/kube-apiserver:v1.34.1
I1123 08:43:34.685224 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1
I1123 08:43:34.685246 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1
I1123 08:43:34.685326 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1
I1123 08:43:34.685340 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1123 08:43:34.700613 258086 containerd.go:285] Loading image: /var/lib/minikube/images/pause_3.10.1
I1123 08:43:34.700688 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/pause_3.10.1
I1123 08:43:34.713376 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-apiserver_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-apiserver_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-apiserver_v1.34.1': No such file or directory
I1123 08:43:34.713409 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/kube-controller-manager_v1.34.1: stat -c "%s %y" /var/lib/minikube/images/kube-controller-manager_v1.34.1: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/kube-controller-manager_v1.34.1': No such file or directory
I1123 08:43:34.713407 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 --> /var/lib/minikube/images/kube-apiserver_v1.34.1 (27073024 bytes)
I1123 08:43:34.713434 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 --> /var/lib/minikube/images/kube-controller-manager_v1.34.1 (22831104 bytes)
I1123 08:43:34.840943 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/pause_3.10.1 from cache
I1123 08:43:34.885583 258086 containerd.go:285] Loading image: /var/lib/minikube/images/coredns_v1.12.1
I1123 08:43:34.885674 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1
I1123 08:43:35.489785 258086 containerd.go:267] Checking existence of image with name "gcr.io/k8s-minikube/storage-provisioner:v5" and sha "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562"
I1123 08:43:35.489853 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images ls name==gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:36.097868 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/coredns_v1.12.1: (1.212165923s)
I1123 08:43:36.097898 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/coredns/coredns_v1.12.1 from cache
I1123 08:43:36.097915 258086 cache_images.go:118] "gcr.io/k8s-minikube/storage-provisioner:v5" needs transfer: "gcr.io/k8s-minikube/storage-provisioner:v5" does not exist at hash "6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562" in container runtime
I1123 08:43:36.097931 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-scheduler_v1.34.1
I1123 08:43:36.097957 258086 cri.go:218] Removing image: gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:36.097992 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1
I1123 08:43:36.098005 258086 ssh_runner.go:195] Run: which crictl
I1123 08:43:37.105043 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-scheduler_v1.34.1: (1.007027025s)
I1123 08:43:37.105070 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-scheduler_v1.34.1 from cache
I1123 08:43:37.105098 258086 ssh_runner.go:235] Completed: which crictl: (1.007074313s)
I1123 08:43:37.105104 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-proxy_v1.34.1
I1123 08:43:37.105153 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:37.105159 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1
I1123 08:43:37.133915 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:33.230087 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:33.729573 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:34.229556 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:34.729739 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:35.229458 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:35.729622 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:36.229768 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:36.730508 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:37.229765 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:37.729788 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:38.229952 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:38.730333 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:39.229833 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:39.729862 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:40.229901 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:40.729885 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:41.230479 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:41.730515 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:42.230247 254114 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:42.326336 254114 kubeadm.go:1114] duration metric: took 11.676850942s to wait for elevateKubeSystemPrivileges
I1123 08:43:42.326376 254114 kubeadm.go:403] duration metric: took 21.509472133s to StartCluster
I1123 08:43:42.326398 254114 settings.go:142] acquiring lock: {Name:mk2c00a8b461754a49d5c7fd5af34c7d1005153a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:42.326470 254114 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21969-13876/kubeconfig
I1123 08:43:42.328223 254114 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/kubeconfig: {Name:mk636046b7146fd65b5638a6d549b76e61f7f055 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:42.328482 254114 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1123 08:43:42.328500 254114 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.103.2 Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:43:42.328566 254114 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:43:42.328729 254114 addons.go:70] Setting storage-provisioner=true in profile "old-k8s-version-204346"
I1123 08:43:42.328754 254114 addons.go:239] Setting addon storage-provisioner=true in "old-k8s-version-204346"
I1123 08:43:42.328778 254114 config.go:182] Loaded profile config "old-k8s-version-204346": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.0
I1123 08:43:42.328793 254114 host.go:66] Checking if "old-k8s-version-204346" exists ...
I1123 08:43:42.328837 254114 addons.go:70] Setting default-storageclass=true in profile "old-k8s-version-204346"
I1123 08:43:42.328856 254114 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "old-k8s-version-204346"
I1123 08:43:42.329183 254114 cli_runner.go:164] Run: docker container inspect old-k8s-version-204346 --format={{.State.Status}}
I1123 08:43:42.329321 254114 cli_runner.go:164] Run: docker container inspect old-k8s-version-204346 --format={{.State.Status}}
I1123 08:43:42.331021 254114 out.go:179] * Verifying Kubernetes components...
I1123 08:43:42.332482 254114 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:43:42.357866 254114 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:38.827550 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:38.827977 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:38.828023 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:38.828070 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:38.854573 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:38.854598 206485 cri.go:89] found id: "89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030"
I1123 08:43:38.854603 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:38.854606 206485 cri.go:89] found id: ""
I1123 08:43:38.854613 206485 logs.go:282] 3 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:38.854688 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.858901 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.862744 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.866475 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:38.866533 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:38.892493 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:38.892520 206485 cri.go:89] found id: ""
I1123 08:43:38.892528 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:38.892575 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.896728 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:38.896790 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:38.923307 206485 cri.go:89] found id: ""
I1123 08:43:38.923331 206485 logs.go:282] 0 containers: []
W1123 08:43:38.923340 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:38.923346 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:38.923392 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:38.949371 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:38.949396 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:38.949401 206485 cri.go:89] found id: ""
I1123 08:43:38.949407 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:38.949452 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.953461 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:38.957266 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:38.957315 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:38.984054 206485 cri.go:89] found id: ""
I1123 08:43:38.984077 206485 logs.go:282] 0 containers: []
W1123 08:43:38.984084 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:38.984090 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:38.984144 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:39.014867 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:39.014894 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:39.014900 206485 cri.go:89] found id: ""
I1123 08:43:39.014909 206485 logs.go:282] 2 containers: [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:39.014988 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:39.019876 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:39.024471 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:39.024545 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:39.056343 206485 cri.go:89] found id: ""
I1123 08:43:39.056370 206485 logs.go:282] 0 containers: []
W1123 08:43:39.056382 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:39.056390 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:39.056447 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:39.087173 206485 cri.go:89] found id: ""
I1123 08:43:39.087200 206485 logs.go:282] 0 containers: []
W1123 08:43:39.087209 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:39.087218 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:39.087230 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:39.143340 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:39.143373 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:39.182502 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:39.182538 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:39.220490 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:39.220526 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:39.279713 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:39.279751 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:39.296632 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:39.296672 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:39.369445 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:39.369477 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:39.369493 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:39.412743 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:39.412782 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:39.445988 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:39.446015 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:39.482074 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:39.482110 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:39.578994 206485 logs.go:123] Gathering logs for kube-apiserver [89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030] ...
I1123 08:43:39.579036 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 89f5abdf45afb9ff15a0744d6b71c9196e67d8f1e07dbde6c14130fa812cd030"
I1123 08:43:39.619624 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:39.619684 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:39.661136 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:39.661175 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:42.204267 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:42.204712 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:42.204771 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:42.204826 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:42.232709 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:42.232730 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:42.232735 206485 cri.go:89] found id: ""
I1123 08:43:42.232744 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:42.232799 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.236622 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.240968 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:42.241028 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:42.281849 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:42.281877 206485 cri.go:89] found id: ""
I1123 08:43:42.281885 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:42.281942 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.287991 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:42.288063 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:42.327625 206485 cri.go:89] found id: ""
I1123 08:43:42.327669 206485 logs.go:282] 0 containers: []
W1123 08:43:42.327679 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:42.327687 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:42.327768 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:39.015203 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-proxy_v1.34.1: (1.910026064s)
I1123 08:43:39.015228 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-proxy_v1.34.1 from cache
I1123 08:43:39.015249 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1123 08:43:39.015286 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-controller-manager_v1.34.1
I1123 08:43:39.015301 258086 ssh_runner.go:235] Completed: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5: (1.881356677s)
I1123 08:43:39.015367 258086 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl rmi gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:43:39.981839 258086 cache_images.go:291] Loading image from: /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5
I1123 08:43:39.981862 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-controller-manager_v1.34.1 from cache
I1123 08:43:39.981901 258086 containerd.go:285] Loading image: /var/lib/minikube/images/kube-apiserver_v1.34.1
I1123 08:43:39.981948 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5
I1123 08:43:39.981955 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1
I1123 08:43:39.985933 258086 ssh_runner.go:352] existence check for /var/lib/minikube/images/storage-provisioner_v5: stat -c "%s %y" /var/lib/minikube/images/storage-provisioner_v5: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/images/storage-provisioner_v5': No such file or directory
I1123 08:43:39.985965 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 --> /var/lib/minikube/images/storage-provisioner_v5 (9060352 bytes)
I1123 08:43:41.077380 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/kube-apiserver_v1.34.1: (1.095406466s)
I1123 08:43:41.077408 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/kube-apiserver_v1.34.1 from cache
I1123 08:43:41.077435 258086 containerd.go:285] Loading image: /var/lib/minikube/images/etcd_3.6.4-0
I1123 08:43:41.077497 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0
I1123 08:43:42.358205 254114 addons.go:239] Setting addon default-storageclass=true in "old-k8s-version-204346"
I1123 08:43:42.358246 254114 host.go:66] Checking if "old-k8s-version-204346" exists ...
I1123 08:43:42.358752 254114 cli_runner.go:164] Run: docker container inspect old-k8s-version-204346 --format={{.State.Status}}
I1123 08:43:42.359206 254114 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:43:42.359225 254114 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:43:42.359285 254114 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-204346
I1123 08:43:42.389614 254114 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:43:42.389635 254114 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:43:42.389707 254114 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" old-k8s-version-204346
I1123 08:43:42.391185 254114 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/old-k8s-version-204346/id_rsa Username:docker}
I1123 08:43:42.422459 254114 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33058 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/old-k8s-version-204346/id_rsa Username:docker}
I1123 08:43:42.449217 254114 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.103.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1123 08:43:42.517611 254114 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:43:42.534960 254114 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:43:42.564953 254114 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:43:42.780756 254114 start.go:977] {"host.minikube.internal": 192.168.103.1} host record injected into CoreDNS's ConfigMap
I1123 08:43:42.781954 254114 node_ready.go:35] waiting up to 6m0s for node "old-k8s-version-204346" to be "Ready" ...
I1123 08:43:43.034443 254114 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1123 08:43:43.035744 254114 addons.go:530] duration metric: took 707.164659ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1123 08:43:42.368955 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:42.368979 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:42.368985 206485 cri.go:89] found id: ""
I1123 08:43:42.368996 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:42.370472 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.378043 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.388658 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:42.388749 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:42.429522 206485 cri.go:89] found id: ""
I1123 08:43:42.429549 206485 logs.go:282] 0 containers: []
W1123 08:43:42.429559 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:42.429566 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:42.429632 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:42.469043 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:42.469070 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:42.469076 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:42.469081 206485 cri.go:89] found id: ""
I1123 08:43:42.469089 206485 logs.go:282] 3 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:42.469144 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.475315 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.481874 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:42.488696 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:42.488921 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:42.533856 206485 cri.go:89] found id: ""
I1123 08:43:42.533914 206485 logs.go:282] 0 containers: []
W1123 08:43:42.533926 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:42.533934 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:42.534029 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:42.577521 206485 cri.go:89] found id: ""
I1123 08:43:42.577543 206485 logs.go:282] 0 containers: []
W1123 08:43:42.577550 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:42.577559 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:42.577568 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:42.665576 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:42.665601 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:42.665622 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:42.723908 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:42.723945 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:42.766588 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:42.766618 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:42.815960 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:42.816050 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:42.836362 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:42.836393 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:42.883211 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:42.883249 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:42.925983 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:42.926057 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:43.002532 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:43.002565 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:43.048891 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:43.048923 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:43.080573 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:43.080606 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:43.145471 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:43.145510 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:43.182994 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:43.183035 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:45.803715 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:45.804092 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:45.804151 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:45.804211 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:45.842142 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:45.842161 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:45.842165 206485 cri.go:89] found id: ""
I1123 08:43:45.842172 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:45.842223 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.846225 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.850730 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:45.850797 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:45.879479 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:45.879506 206485 cri.go:89] found id: ""
I1123 08:43:45.879515 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:45.879576 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.884738 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:45.884801 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:45.916040 206485 cri.go:89] found id: ""
I1123 08:43:45.916069 206485 logs.go:282] 0 containers: []
W1123 08:43:45.916080 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:45.916088 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:45.916155 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:45.947206 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:45.947237 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:45.947242 206485 cri.go:89] found id: ""
I1123 08:43:45.947252 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:45.947308 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.952246 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:45.956172 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:45.956233 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:45.986919 206485 cri.go:89] found id: ""
I1123 08:43:45.986945 206485 logs.go:282] 0 containers: []
W1123 08:43:45.986956 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:45.986964 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:45.987017 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:46.019241 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:46.019269 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:46.019273 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:46.019278 206485 cri.go:89] found id: ""
I1123 08:43:46.019286 206485 logs.go:282] 3 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:46.019345 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:46.024190 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:46.028847 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:46.033363 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:46.033436 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:46.067781 206485 cri.go:89] found id: ""
I1123 08:43:46.067808 206485 logs.go:282] 0 containers: []
W1123 08:43:46.067819 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:46.067827 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:46.067885 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:46.100053 206485 cri.go:89] found id: ""
I1123 08:43:46.100084 206485 logs.go:282] 0 containers: []
W1123 08:43:46.100094 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:46.100107 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:46.100122 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:46.146426 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:46.146456 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:46.208332 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:46.208375 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:46.247193 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:46.247229 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:46.264714 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:46.264742 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:46.336341 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:46.336363 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:46.336376 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:46.379827 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:46.379866 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:46.425899 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:46.425925 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:46.491769 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:46.491805 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:46.523775 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:46.523805 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:46.555025 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:46.555060 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:46.592667 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:46.592709 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:46.691047 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:46.691081 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:43.958800 258086 ssh_runner.go:235] Completed: sudo ctr -n=k8s.io images import /var/lib/minikube/images/etcd_3.6.4-0: (2.881269634s)
I1123 08:43:43.958835 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/registry.k8s.io/etcd_3.6.4-0 from cache
I1123 08:43:43.958864 258086 containerd.go:285] Loading image: /var/lib/minikube/images/storage-provisioner_v5
I1123 08:43:43.958908 258086 ssh_runner.go:195] Run: sudo ctr -n=k8s.io images import /var/lib/minikube/images/storage-provisioner_v5
I1123 08:43:44.336453 258086 cache_images.go:323] Transferred and loaded /home/jenkins/minikube-integration/21969-13876/.minikube/cache/images/amd64/gcr.io/k8s-minikube/storage-provisioner_v5 from cache
I1123 08:43:44.336514 258086 cache_images.go:125] Successfully loaded all cached images
I1123 08:43:44.336522 258086 cache_images.go:94] duration metric: took 10.177118s to LoadCachedImages
I1123 08:43:44.336535 258086 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.34.1 containerd true true} ...
I1123 08:43:44.336675 258086 kubeadm.go:947] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-999106 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:43:44.336740 258086 ssh_runner.go:195] Run: sudo crictl info
I1123 08:43:44.361999 258086 cni.go:84] Creating CNI manager for ""
I1123 08:43:44.362021 258086 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:44.362037 258086 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:43:44.362060 258086 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-999106 NodeName:no-preload-999106 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPa
th:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:43:44.362197 258086 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "no-preload-999106"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:43:44.362266 258086 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:43:44.371147 258086 binaries.go:54] Didn't find k8s binaries: sudo ls /var/lib/minikube/binaries/v1.34.1: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/binaries/v1.34.1': No such file or directory
Initiating transfer...
I1123 08:43:44.371205 258086 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/binaries/v1.34.1
I1123 08:43:44.379477 258086 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
I1123 08:43:44.379559 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl
I1123 08:43:44.379560 258086 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubelet.sha256 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubelet
I1123 08:43:44.379590 258086 download.go:108] Downloading: https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubeadm.sha256 -> /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubeadm
I1123 08:43:44.384906 258086 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubectl: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubectl: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubectl': No such file or directory
I1123 08:43:44.384935 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubectl --> /var/lib/minikube/binaries/v1.34.1/kubectl (60559544 bytes)
I1123 08:43:45.307760 258086 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:43:45.321272 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet
I1123 08:43:45.325776 258086 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubelet: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubelet: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubelet': No such file or directory
I1123 08:43:45.325807 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubelet --> /var/lib/minikube/binaries/v1.34.1/kubelet (59195684 bytes)
I1123 08:43:45.440984 258086 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm
I1123 08:43:45.448490 258086 ssh_runner.go:352] existence check for /var/lib/minikube/binaries/v1.34.1/kubeadm: stat -c "%s %y" /var/lib/minikube/binaries/v1.34.1/kubeadm: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/binaries/v1.34.1/kubeadm': No such file or directory
I1123 08:43:45.448546 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/cache/linux/amd64/v1.34.1/kubeadm --> /var/lib/minikube/binaries/v1.34.1/kubeadm (74027192 bytes)
I1123 08:43:45.718942 258086 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:43:45.729752 258086 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (321 bytes)
I1123 08:43:45.746904 258086 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:43:45.764606 258086 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2229 bytes)
I1123 08:43:45.779438 258086 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I1123 08:43:45.783637 258086 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:43:45.795787 258086 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:43:45.901866 258086 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:43:45.931680 258086 certs.go:69] Setting up /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106 for IP: 192.168.85.2
I1123 08:43:45.931702 258086 certs.go:195] generating shared ca certs ...
I1123 08:43:45.931722 258086 certs.go:227] acquiring lock for ca certs: {Name:mk376e2c25eb30d8b09b93cb4624441e819bcc8f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:45.931883 258086 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21969-13876/.minikube/ca.key
I1123 08:43:45.931922 258086 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21969-13876/.minikube/proxy-client-ca.key
I1123 08:43:45.931931 258086 certs.go:257] generating profile certs ...
I1123 08:43:45.932023 258086 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.key
I1123 08:43:45.932046 258086 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.crt with IP's: []
I1123 08:43:46.076820 258086 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.crt ...
I1123 08:43:46.076852 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.crt: {Name:mk264e21cffc1d235a0a5153e1f533874608a488 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.077062 258086 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.key ...
I1123 08:43:46.077094 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/client.key: {Name:mk09f5a31cd584eb4ea102a803f662bacda0e612 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.077204 258086 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c
I1123 08:43:46.077226 258086 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I1123 08:43:46.147038 258086 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c ...
I1123 08:43:46.147076 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c: {Name:mk2b60ecfaddc28f6e9e91bd0ff2b48be7ad7023 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.147257 258086 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c ...
I1123 08:43:46.147277 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c: {Name:mk8ce7b23d7c04fba7d8d30f580f5ae25a8eaa1e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.147393 258086 certs.go:382] copying /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt.ff765c4c -> /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt
I1123 08:43:46.147504 258086 certs.go:386] copying /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key.ff765c4c -> /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key
I1123 08:43:46.147597 258086 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key
I1123 08:43:46.147614 258086 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt with IP's: []
I1123 08:43:46.188254 258086 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt ...
I1123 08:43:46.188285 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt: {Name:mkce831c55c8c6f96bdb743bd92d80212f28ceec Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.188486 258086 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key ...
I1123 08:43:46.188506 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key: {Name:mk2b9a4c76ac3acf445fdcb1e14850de2c1a5507 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:43:46.188762 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/17442.pem (1338 bytes)
W1123 08:43:46.188820 258086 certs.go:480] ignoring /home/jenkins/minikube-integration/21969-13876/.minikube/certs/17442_empty.pem, impossibly tiny 0 bytes
I1123 08:43:46.188836 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:43:46.188874 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/ca.pem (1078 bytes)
I1123 08:43:46.188907 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/cert.pem (1123 bytes)
I1123 08:43:46.188942 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/certs/key.pem (1675 bytes)
I1123 08:43:46.189009 258086 certs.go:484] found cert: /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem (1708 bytes)
I1123 08:43:46.189889 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:43:46.212738 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:43:46.235727 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:43:46.259309 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:43:46.282164 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1123 08:43:46.305443 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1123 08:43:46.328998 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:43:46.351947 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/profiles/no-preload-999106/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1123 08:43:46.375511 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:43:46.401909 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/certs/17442.pem --> /usr/share/ca-certificates/17442.pem (1338 bytes)
I1123 08:43:46.424180 258086 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21969-13876/.minikube/files/etc/ssl/certs/174422.pem --> /usr/share/ca-certificates/174422.pem (1708 bytes)
I1123 08:43:46.445575 258086 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:43:46.461580 258086 ssh_runner.go:195] Run: openssl version
I1123 08:43:46.468524 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:43:46.477534 258086 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:43:46.482510 258086 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 08:11 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:43:46.482577 258086 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:43:46.523991 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:43:46.535125 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/17442.pem && ln -fs /usr/share/ca-certificates/17442.pem /etc/ssl/certs/17442.pem"
I1123 08:43:46.546052 258086 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/17442.pem
I1123 08:43:46.552569 258086 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:16 /usr/share/ca-certificates/17442.pem
I1123 08:43:46.552702 258086 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/17442.pem
I1123 08:43:46.600806 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/17442.pem /etc/ssl/certs/51391683.0"
I1123 08:43:46.610524 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/174422.pem && ln -fs /usr/share/ca-certificates/174422.pem /etc/ssl/certs/174422.pem"
I1123 08:43:46.621451 258086 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/174422.pem
I1123 08:43:46.625905 258086 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:16 /usr/share/ca-certificates/174422.pem
I1123 08:43:46.625966 258086 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/174422.pem
I1123 08:43:46.663055 258086 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/174422.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:43:46.672614 258086 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:43:46.676799 258086 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1123 08:43:46.676865 258086 kubeadm.go:401] StartCluster: {Name:no-preload-999106 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:no-preload-999106 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APISer
verNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:43:46.676948 258086 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I1123 08:43:46.677027 258086 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1123 08:43:46.706515 258086 cri.go:89] found id: ""
I1123 08:43:46.706599 258086 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:43:46.715791 258086 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:43:46.725599 258086 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I1123 08:43:46.725695 258086 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:43:46.734727 258086 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:43:46.734752 258086 kubeadm.go:158] found existing configuration files:
I1123 08:43:46.734794 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:43:46.743841 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:43:46.743892 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:43:46.752521 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:43:46.761347 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:43:46.761400 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:43:46.769196 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:43:46.777174 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:43:46.777227 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:43:46.784869 258086 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:43:46.793707 258086 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:43:46.793768 258086 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:43:46.801586 258086 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1123 08:43:46.858285 258086 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1044-gcp\n", err: exit status 1
I1123 08:43:46.916186 258086 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:43:43.286172 254114 kapi.go:214] "coredns" deployment in "kube-system" namespace and "old-k8s-version-204346" context rescaled to 1 replicas
W1123 08:43:44.785588 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
W1123 08:43:46.785746 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
I1123 08:43:49.228668 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:49.229070 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:49.229121 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:49.229170 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:49.256973 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:49.256994 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:49.257000 206485 cri.go:89] found id: ""
I1123 08:43:49.257008 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:49.257070 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.261237 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.264766 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:49.264830 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:49.290113 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:49.290135 206485 cri.go:89] found id: ""
I1123 08:43:49.290145 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:49.290199 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.293989 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:49.294053 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:49.320161 206485 cri.go:89] found id: ""
I1123 08:43:49.320191 206485 logs.go:282] 0 containers: []
W1123 08:43:49.320202 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:49.320210 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:49.320264 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:49.347363 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:49.347384 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:49.347391 206485 cri.go:89] found id: ""
I1123 08:43:49.347407 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:49.347464 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.351525 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.355374 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:49.355433 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:49.382984 206485 cri.go:89] found id: ""
I1123 08:43:49.383010 206485 logs.go:282] 0 containers: []
W1123 08:43:49.383020 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:49.383028 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:49.383086 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:49.409377 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:49.409402 206485 cri.go:89] found id: "7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:49.409408 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:49.409413 206485 cri.go:89] found id: ""
I1123 08:43:49.409421 206485 logs.go:282] 3 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:49.409468 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.413850 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.417701 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:49.421307 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:49.421373 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:49.447409 206485 cri.go:89] found id: ""
I1123 08:43:49.447433 206485 logs.go:282] 0 containers: []
W1123 08:43:49.447444 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:49.447451 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:49.447512 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:49.474526 206485 cri.go:89] found id: ""
I1123 08:43:49.474554 206485 logs.go:282] 0 containers: []
W1123 08:43:49.474562 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:49.474572 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:49.474580 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:49.566947 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:49.566990 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:49.581192 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:49.581218 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:49.640574 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:49.640596 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:49.640610 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:49.676070 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:49.676097 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:49.710524 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:49.710555 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:49.785389 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:49.785422 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:49.819651 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:49.819677 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:49.847192 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:49.847216 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:49.878622 206485 logs.go:123] Gathering logs for kube-controller-manager [7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb] ...
I1123 08:43:49.878674 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 7629f2a7eb00cde594bb5ce8d8a3080ec5e16484bb96c70953456b9ad4f543bb"
I1123 08:43:49.904924 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:49.904958 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:49.937225 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:49.937252 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:49.987441 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:49.987483 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
W1123 08:43:49.285708 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
W1123 08:43:51.285827 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
I1123 08:43:56.990600 258086 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1123 08:43:56.990724 258086 kubeadm.go:319] [preflight] Running pre-flight checks
I1123 08:43:56.990889 258086 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I1123 08:43:56.990976 258086 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1044-gcp[0m
I1123 08:43:56.991027 258086 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I1123 08:43:56.991098 258086 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1123 08:43:56.991170 258086 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1123 08:43:56.991327 258086 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1123 08:43:56.991401 258086 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1123 08:43:56.991513 258086 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1123 08:43:56.991594 258086 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1123 08:43:56.991696 258086 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1123 08:43:56.991760 258086 kubeadm.go:319] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I1123 08:43:56.991928 258086 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1123 08:43:56.992079 258086 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1123 08:43:56.992203 258086 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1123 08:43:56.992277 258086 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1123 08:43:56.993629 258086 out.go:252] - Generating certificates and keys ...
I1123 08:43:56.993773 258086 kubeadm.go:319] [certs] Using existing ca certificate authority
I1123 08:43:56.993882 258086 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1123 08:43:56.993978 258086 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1123 08:43:56.994054 258086 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1123 08:43:56.994139 258086 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1123 08:43:56.994210 258086 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1123 08:43:56.994287 258086 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1123 08:43:56.994448 258086 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost no-preload-999106] and IPs [192.168.85.2 127.0.0.1 ::1]
I1123 08:43:56.994523 258086 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1123 08:43:56.994701 258086 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost no-preload-999106] and IPs [192.168.85.2 127.0.0.1 ::1]
I1123 08:43:56.994808 258086 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1123 08:43:56.994907 258086 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1123 08:43:56.994974 258086 kubeadm.go:319] [certs] Generating "sa" key and public key
I1123 08:43:56.995052 258086 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1123 08:43:56.995136 258086 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1123 08:43:56.995230 258086 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1123 08:43:56.995314 258086 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1123 08:43:56.995407 258086 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1123 08:43:56.995507 258086 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1123 08:43:56.995596 258086 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1123 08:43:56.995670 258086 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1123 08:43:56.998197 258086 out.go:252] - Booting up control plane ...
I1123 08:43:56.998282 258086 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:43:56.998367 258086 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:43:56.998479 258086 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:43:56.998614 258086 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:43:56.998760 258086 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1123 08:43:56.998861 258086 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1123 08:43:56.998949 258086 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:43:56.998984 258086 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:43:56.999108 258086 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1123 08:43:56.999224 258086 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1123 08:43:56.999284 258086 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 501.962401ms
I1123 08:43:56.999376 258086 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1123 08:43:56.999453 258086 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.85.2:8443/livez
I1123 08:43:56.999531 258086 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1123 08:43:56.999598 258086 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1123 08:43:56.999680 258086 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 1.69972236s
I1123 08:43:56.999756 258086 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.979262438s
I1123 08:43:56.999857 258086 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 3.502236354s
I1123 08:43:56.999983 258086 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:43:57.000181 258086 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:43:57.000269 258086 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:43:57.000528 258086 kubeadm.go:319] [mark-control-plane] Marking the node no-preload-999106 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:43:57.000596 258086 kubeadm.go:319] [bootstrap-token] Using token: augmq1.wtvrtjusohbhz9fp
I1123 08:43:57.002234 258086 out.go:252] - Configuring RBAC rules ...
I1123 08:43:57.002330 258086 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:43:57.002408 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:43:57.002539 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:43:57.002709 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:43:57.002823 258086 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:43:57.002898 258086 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:43:57.003040 258086 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:43:57.003091 258086 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:43:57.003157 258086 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:43:57.003173 258086 kubeadm.go:319]
I1123 08:43:57.003224 258086 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:43:57.003229 258086 kubeadm.go:319]
I1123 08:43:57.003293 258086 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:43:57.003299 258086 kubeadm.go:319]
I1123 08:43:57.003325 258086 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:43:57.003380 258086 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:43:57.003424 258086 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:43:57.003429 258086 kubeadm.go:319]
I1123 08:43:57.003474 258086 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:43:57.003483 258086 kubeadm.go:319]
I1123 08:43:57.003523 258086 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:43:57.003529 258086 kubeadm.go:319]
I1123 08:43:57.003586 258086 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:43:57.003674 258086 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:43:57.003774 258086 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:43:57.003795 258086 kubeadm.go:319]
I1123 08:43:57.003914 258086 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:43:57.004021 258086 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:43:57.004031 258086 kubeadm.go:319]
I1123 08:43:57.004153 258086 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token augmq1.wtvrtjusohbhz9fp \
I1123 08:43:57.004275 258086 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa \
I1123 08:43:57.004298 258086 kubeadm.go:319] --control-plane
I1123 08:43:57.004302 258086 kubeadm.go:319]
I1123 08:43:57.004373 258086 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:43:57.004379 258086 kubeadm.go:319]
I1123 08:43:57.004452 258086 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token augmq1.wtvrtjusohbhz9fp \
I1123 08:43:57.004563 258086 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:5c48a3b11504a9c7a5d242d913eadf6a5354a8cb06c9ffcf8385d22efb04d8fa
I1123 08:43:57.004575 258086 cni.go:84] Creating CNI manager for ""
I1123 08:43:57.004581 258086 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I1123 08:43:57.007194 258086 out.go:179] * Configuring CNI (Container Networking Interface) ...
I1123 08:43:52.520061 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:52.520694 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:52.520747 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:52.520799 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:52.553943 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:52.553969 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:52.553975 206485 cri.go:89] found id: ""
I1123 08:43:52.553983 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:52.554042 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.559842 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.565197 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:52.565266 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:52.601499 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:52.601529 206485 cri.go:89] found id: ""
I1123 08:43:52.601568 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:52.601621 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.606848 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:52.606925 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:52.645028 206485 cri.go:89] found id: ""
I1123 08:43:52.645061 206485 logs.go:282] 0 containers: []
W1123 08:43:52.645072 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:52.645079 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:52.645139 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:52.681457 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:52.681484 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:52.681490 206485 cri.go:89] found id: ""
I1123 08:43:52.681499 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:52.681557 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.686548 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.690588 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:52.690682 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:52.723180 206485 cri.go:89] found id: ""
I1123 08:43:52.723208 206485 logs.go:282] 0 containers: []
W1123 08:43:52.723217 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:52.723224 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:52.723287 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:52.756887 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:52.756911 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:52.756921 206485 cri.go:89] found id: ""
I1123 08:43:52.756929 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:52.756985 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.761180 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:52.765188 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:52.765247 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:52.795290 206485 cri.go:89] found id: ""
I1123 08:43:52.795319 206485 logs.go:282] 0 containers: []
W1123 08:43:52.795329 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:52.795336 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:52.795395 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:52.822978 206485 cri.go:89] found id: ""
I1123 08:43:52.823006 206485 logs.go:282] 0 containers: []
W1123 08:43:52.823013 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:52.823022 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:52.823034 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:52.859205 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:52.859240 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:52.910295 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:52.910334 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:52.948004 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:52.948045 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:52.982700 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:52.982734 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:53.055592 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:53.055634 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:53.097286 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:53.097327 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:53.133102 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:53.133146 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:53.170688 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:53.170722 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:53.281419 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:53.281464 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:53.298748 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:53.298777 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:53.373016 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:53.373040 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:53.373054 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:55.914776 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:55.915250 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:55.915303 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:55.915351 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:55.943544 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:55.943567 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:55.943572 206485 cri.go:89] found id: ""
I1123 08:43:55.943579 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:55.943622 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:55.948391 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:55.952924 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:55.952992 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:55.981407 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:55.981431 206485 cri.go:89] found id: ""
I1123 08:43:55.981441 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:55.981501 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:55.986304 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:55.986378 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:56.014167 206485 cri.go:89] found id: ""
I1123 08:43:56.014192 206485 logs.go:282] 0 containers: []
W1123 08:43:56.014200 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:56.014206 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:56.014262 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:56.050121 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:56.050153 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:56.050160 206485 cri.go:89] found id: ""
I1123 08:43:56.050170 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:56.050236 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.055306 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.059507 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:56.059586 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:56.092810 206485 cri.go:89] found id: ""
I1123 08:43:56.092843 206485 logs.go:282] 0 containers: []
W1123 08:43:56.092856 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:56.092864 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:56.092931 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:56.126845 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:56.126869 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:56.126874 206485 cri.go:89] found id: ""
I1123 08:43:56.126884 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:56.126939 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.131943 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:56.135880 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:56.135945 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:56.163669 206485 cri.go:89] found id: ""
I1123 08:43:56.163696 206485 logs.go:282] 0 containers: []
W1123 08:43:56.163707 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:56.163714 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:56.163773 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:56.197602 206485 cri.go:89] found id: ""
I1123 08:43:56.197638 206485 logs.go:282] 0 containers: []
W1123 08:43:56.197660 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:56.197672 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:56.197689 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:56.238940 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:56.238981 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:56.288636 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:56.288691 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:56.324266 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:56.324299 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:56.378458 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:56.378498 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:56.417284 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:56.417313 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:56.509149 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:56.509182 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:56.523057 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:56.523082 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:56.583048 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:56.583074 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:56.583095 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:56.618320 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:56.618358 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:56.651682 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:56.651713 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:56.709657 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:56.709694 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:57.008714 258086 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1123 08:43:57.013402 258086 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.1/kubectl ...
I1123 08:43:57.013443 258086 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1123 08:43:57.028881 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1123 08:43:57.253419 258086 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:43:57.253530 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:57.253599 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes no-preload-999106 minikube.k8s.io/updated_at=2025_11_23T08_43_57_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=50c3a8a3c03e8a84b6c978a884d21c3de8c6d4f1 minikube.k8s.io/name=no-preload-999106 minikube.k8s.io/primary=true
I1123 08:43:57.264168 258086 ops.go:34] apiserver oom_adj: -16
I1123 08:43:57.330032 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
W1123 08:43:53.286319 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
W1123 08:43:55.786003 254114 node_ready.go:57] node "old-k8s-version-204346" has "Ready":"False" status (will retry)
I1123 08:43:57.285411 254114 node_ready.go:49] node "old-k8s-version-204346" is "Ready"
I1123 08:43:57.285445 254114 node_ready.go:38] duration metric: took 14.503433565s for node "old-k8s-version-204346" to be "Ready" ...
I1123 08:43:57.285462 254114 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:43:57.285564 254114 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:43:57.301686 254114 api_server.go:72] duration metric: took 14.973147695s to wait for apiserver process to appear ...
I1123 08:43:57.301718 254114 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:43:57.301742 254114 api_server.go:253] Checking apiserver healthz at https://192.168.103.2:8443/healthz ...
I1123 08:43:57.306545 254114 api_server.go:279] https://192.168.103.2:8443/healthz returned 200:
ok
I1123 08:43:57.308093 254114 api_server.go:141] control plane version: v1.28.0
I1123 08:43:57.308124 254114 api_server.go:131] duration metric: took 6.398178ms to wait for apiserver health ...
I1123 08:43:57.308135 254114 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:43:57.312486 254114 system_pods.go:59] 8 kube-system pods found
I1123 08:43:57.312519 254114 system_pods.go:61] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:43:57.312525 254114 system_pods.go:61] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:57.312530 254114 system_pods.go:61] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:57.312539 254114 system_pods.go:61] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:57.312542 254114 system_pods.go:61] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:57.312546 254114 system_pods.go:61] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:57.312548 254114 system_pods.go:61] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:57.312553 254114 system_pods.go:61] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:43:57.312559 254114 system_pods.go:74] duration metric: took 4.418082ms to wait for pod list to return data ...
I1123 08:43:57.312566 254114 default_sa.go:34] waiting for default service account to be created ...
I1123 08:43:57.315607 254114 default_sa.go:45] found service account: "default"
I1123 08:43:57.315634 254114 default_sa.go:55] duration metric: took 3.061615ms for default service account to be created ...
I1123 08:43:57.315674 254114 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 08:43:57.320602 254114 system_pods.go:86] 8 kube-system pods found
I1123 08:43:57.320629 254114 system_pods.go:89] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:43:57.320634 254114 system_pods.go:89] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:57.320639 254114 system_pods.go:89] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:57.320657 254114 system_pods.go:89] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:57.320663 254114 system_pods.go:89] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:57.320668 254114 system_pods.go:89] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:57.320673 254114 system_pods.go:89] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:57.320679 254114 system_pods.go:89] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:43:57.320708 254114 retry.go:31] will retry after 281.398987ms: missing components: kube-dns
I1123 08:43:57.607881 254114 system_pods.go:86] 8 kube-system pods found
I1123 08:43:57.607919 254114 system_pods.go:89] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:43:57.607927 254114 system_pods.go:89] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:57.607936 254114 system_pods.go:89] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:57.607942 254114 system_pods.go:89] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:57.607948 254114 system_pods.go:89] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:57.607952 254114 system_pods.go:89] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:57.607957 254114 system_pods.go:89] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:57.607964 254114 system_pods.go:89] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1123 08:43:57.607991 254114 retry.go:31] will retry after 389.750642ms: missing components: kube-dns
I1123 08:43:58.002207 254114 system_pods.go:86] 8 kube-system pods found
I1123 08:43:58.002234 254114 system_pods.go:89] "coredns-5dd5756b68-2fdsv" [1c71e052-b3c2-4875-8aeb-7d724ee26e06] Running
I1123 08:43:58.002240 254114 system_pods.go:89] "etcd-old-k8s-version-204346" [58cc20a4-23f1-4a5a-ba0a-03fadfc6df09] Running
I1123 08:43:58.002249 254114 system_pods.go:89] "kindnet-q8xnm" [c3178adf-8eb3-4210-9674-fdda89d3317d] Running
I1123 08:43:58.002253 254114 system_pods.go:89] "kube-apiserver-old-k8s-version-204346" [e63e828c-37a0-48ab-9413-932b3cde09cc] Running
I1123 08:43:58.002257 254114 system_pods.go:89] "kube-controller-manager-old-k8s-version-204346" [bbaefdad-f8f3-4264-a467-5f75937de2a0] Running
I1123 08:43:58.002261 254114 system_pods.go:89] "kube-proxy-tzq9b" [5d122719-2577-438f-bae7-72a1034f88ef] Running
I1123 08:43:58.002264 254114 system_pods.go:89] "kube-scheduler-old-k8s-version-204346" [773bcc91-2553-4606-91ab-f32ec0ba3738] Running
I1123 08:43:58.002267 254114 system_pods.go:89] "storage-provisioner" [372382d8-d23f-4e6d-89ae-8f2c9c46b6dc] Running
I1123 08:43:58.002275 254114 system_pods.go:126] duration metric: took 686.59398ms to wait for k8s-apps to be running ...
I1123 08:43:58.002285 254114 system_svc.go:44] waiting for kubelet service to be running ....
I1123 08:43:58.002331 254114 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:43:58.016798 254114 system_svc.go:56] duration metric: took 14.504815ms WaitForService to wait for kubelet
I1123 08:43:58.016829 254114 kubeadm.go:587] duration metric: took 15.688298138s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:43:58.016854 254114 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:43:58.021952 254114 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I1123 08:43:58.021983 254114 node_conditions.go:123] node cpu capacity is 8
I1123 08:43:58.022010 254114 node_conditions.go:105] duration metric: took 5.146561ms to run NodePressure ...
I1123 08:43:58.022026 254114 start.go:242] waiting for startup goroutines ...
I1123 08:43:58.022040 254114 start.go:247] waiting for cluster config update ...
I1123 08:43:58.022056 254114 start.go:256] writing updated cluster config ...
I1123 08:43:58.022354 254114 ssh_runner.go:195] Run: rm -f paused
I1123 08:43:58.026482 254114 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:43:58.030783 254114 pod_ready.go:83] waiting for pod "coredns-5dd5756b68-2fdsv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.035326 254114 pod_ready.go:94] pod "coredns-5dd5756b68-2fdsv" is "Ready"
I1123 08:43:58.035351 254114 pod_ready.go:86] duration metric: took 4.542747ms for pod "coredns-5dd5756b68-2fdsv" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.038155 254114 pod_ready.go:83] waiting for pod "etcd-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.042389 254114 pod_ready.go:94] pod "etcd-old-k8s-version-204346" is "Ready"
I1123 08:43:58.042413 254114 pod_ready.go:86] duration metric: took 4.236026ms for pod "etcd-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.045530 254114 pod_ready.go:83] waiting for pod "kube-apiserver-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.049686 254114 pod_ready.go:94] pod "kube-apiserver-old-k8s-version-204346" is "Ready"
I1123 08:43:58.049708 254114 pod_ready.go:86] duration metric: took 4.151976ms for pod "kube-apiserver-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.052167 254114 pod_ready.go:83] waiting for pod "kube-controller-manager-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.430619 254114 pod_ready.go:94] pod "kube-controller-manager-old-k8s-version-204346" is "Ready"
I1123 08:43:58.430662 254114 pod_ready.go:86] duration metric: took 378.478321ms for pod "kube-controller-manager-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:58.631434 254114 pod_ready.go:83] waiting for pod "kube-proxy-tzq9b" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.030458 254114 pod_ready.go:94] pod "kube-proxy-tzq9b" is "Ready"
I1123 08:43:59.030484 254114 pod_ready.go:86] duration metric: took 399.024693ms for pod "kube-proxy-tzq9b" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.231371 254114 pod_ready.go:83] waiting for pod "kube-scheduler-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.630789 254114 pod_ready.go:94] pod "kube-scheduler-old-k8s-version-204346" is "Ready"
I1123 08:43:59.630824 254114 pod_ready.go:86] duration metric: took 399.424476ms for pod "kube-scheduler-old-k8s-version-204346" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:43:59.630840 254114 pod_ready.go:40] duration metric: took 1.604329749s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:43:59.682106 254114 start.go:625] kubectl: 1.34.2, cluster: 1.28.0 (minor skew: 6)
I1123 08:43:59.683780 254114 out.go:203]
W1123 08:43:59.685129 254114 out.go:285] ! /usr/local/bin/kubectl is version 1.34.2, which may have incompatibilities with Kubernetes 1.28.0.
I1123 08:43:59.686407 254114 out.go:179] - Want kubectl v1.28.0? Try 'minikube kubectl -- get pods -A'
I1123 08:43:59.689781 254114 out.go:179] * Done! kubectl is now configured to use "old-k8s-version-204346" cluster and "default" namespace by default
I1123 08:43:59.237742 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:43:59.238210 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:43:59.238271 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:43:59.238328 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:43:59.266168 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:59.266191 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:59.266197 206485 cri.go:89] found id: ""
I1123 08:43:59.266205 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:43:59.266261 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.270518 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.274380 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:43:59.274439 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:43:59.301514 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:59.301542 206485 cri.go:89] found id: ""
I1123 08:43:59.301552 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:43:59.301612 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.305940 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:43:59.306010 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:43:59.332361 206485 cri.go:89] found id: ""
I1123 08:43:59.332384 206485 logs.go:282] 0 containers: []
W1123 08:43:59.332394 206485 logs.go:284] No container was found matching "coredns"
I1123 08:43:59.332402 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:43:59.332453 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:43:59.360415 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:59.360515 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:59.360533 206485 cri.go:89] found id: ""
I1123 08:43:59.360541 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:43:59.360600 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.364967 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.369350 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:43:59.369411 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:43:59.400932 206485 cri.go:89] found id: ""
I1123 08:43:59.400960 206485 logs.go:282] 0 containers: []
W1123 08:43:59.400971 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:43:59.400979 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:43:59.401039 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:43:59.426988 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:59.427009 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:59.427013 206485 cri.go:89] found id: ""
I1123 08:43:59.427019 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:43:59.427065 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.431308 206485 ssh_runner.go:195] Run: which crictl
I1123 08:43:59.435139 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:43:59.435187 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:43:59.461062 206485 cri.go:89] found id: ""
I1123 08:43:59.461089 206485 logs.go:282] 0 containers: []
W1123 08:43:59.461098 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:43:59.461106 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:43:59.461156 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:43:59.487437 206485 cri.go:89] found id: ""
I1123 08:43:59.487458 206485 logs.go:282] 0 containers: []
W1123 08:43:59.487467 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:43:59.487476 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:43:59.487487 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:43:59.520087 206485 logs.go:123] Gathering logs for container status ...
I1123 08:43:59.520115 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:43:59.551620 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:43:59.551662 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:43:59.610836 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:43:59.610857 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:43:59.610875 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:43:59.647413 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:43:59.647458 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:43:59.686992 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:43:59.687024 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:43:59.724084 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:43:59.724115 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:43:59.760830 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:43:59.760916 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:43:59.811485 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:43:59.811519 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:43:59.920592 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:43:59.920624 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:43:59.937635 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:43:59.937681 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:43:59.974909 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:43:59.974948 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:43:57.830451 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:58.330875 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:58.830628 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:59.330282 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:43:59.830162 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:00.330422 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:00.830950 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:01.330805 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:01.830841 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:02.330880 258086 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:44:02.414724 258086 kubeadm.go:1114] duration metric: took 5.161257652s to wait for elevateKubeSystemPrivileges
I1123 08:44:02.414756 258086 kubeadm.go:403] duration metric: took 15.737896165s to StartCluster
I1123 08:44:02.414776 258086 settings.go:142] acquiring lock: {Name:mk2c00a8b461754a49d5c7fd5af34c7d1005153a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:44:02.414842 258086 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21969-13876/kubeconfig
I1123 08:44:02.416821 258086 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21969-13876/kubeconfig: {Name:mk636046b7146fd65b5638a6d549b76e61f7f055 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:44:02.417741 258086 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I1123 08:44:02.417762 258086 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1123 08:44:02.417786 258086 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:44:02.417889 258086 addons.go:70] Setting storage-provisioner=true in profile "no-preload-999106"
I1123 08:44:02.417910 258086 addons.go:239] Setting addon storage-provisioner=true in "no-preload-999106"
I1123 08:44:02.417926 258086 addons.go:70] Setting default-storageclass=true in profile "no-preload-999106"
I1123 08:44:02.417947 258086 config.go:182] Loaded profile config "no-preload-999106": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.34.1
I1123 08:44:02.417950 258086 host.go:66] Checking if "no-preload-999106" exists ...
I1123 08:44:02.417952 258086 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-999106"
I1123 08:44:02.418452 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:44:02.418590 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:44:02.419817 258086 out.go:179] * Verifying Kubernetes components...
I1123 08:44:02.422556 258086 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:44:02.448285 258086 addons.go:239] Setting addon default-storageclass=true in "no-preload-999106"
I1123 08:44:02.448336 258086 host.go:66] Checking if "no-preload-999106" exists ...
I1123 08:44:02.448496 258086 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:44:02.448879 258086 cli_runner.go:164] Run: docker container inspect no-preload-999106 --format={{.State.Status}}
I1123 08:44:02.449866 258086 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:44:02.449888 258086 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:44:02.449940 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:44:02.479849 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:44:02.481186 258086 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:44:02.481210 258086 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:44:02.481267 258086 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" no-preload-999106
I1123 08:44:02.506758 258086 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33063 SSHKeyPath:/home/jenkins/minikube-integration/21969-13876/.minikube/machines/no-preload-999106/id_rsa Username:docker}
I1123 08:44:02.518200 258086 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.85.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1123 08:44:02.581982 258086 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:44:02.612639 258086 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:44:02.629441 258086 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:44:02.722551 258086 start.go:977] {"host.minikube.internal": 192.168.85.1} host record injected into CoreDNS's ConfigMap
I1123 08:44:02.724186 258086 node_ready.go:35] waiting up to 6m0s for node "no-preload-999106" to be "Ready" ...
I1123 08:44:02.952603 258086 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1123 08:44:02.531044 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:44:02.531451 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:44:02.531515 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:44:02.531572 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:44:02.568683 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:02.568716 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:02.568723 206485 cri.go:89] found id: ""
I1123 08:44:02.568732 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:44:02.568799 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.573171 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.577424 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:44:02.577582 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:44:02.618894 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:02.618923 206485 cri.go:89] found id: ""
I1123 08:44:02.618932 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:44:02.618987 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.624397 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:44:02.624456 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:44:02.659100 206485 cri.go:89] found id: ""
I1123 08:44:02.659131 206485 logs.go:282] 0 containers: []
W1123 08:44:02.659143 206485 logs.go:284] No container was found matching "coredns"
I1123 08:44:02.659151 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:44:02.659213 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:44:02.694829 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:02.694848 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:02.694852 206485 cri.go:89] found id: ""
I1123 08:44:02.694859 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:44:02.694907 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.700604 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.705763 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:44:02.705843 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:44:02.741480 206485 cri.go:89] found id: ""
I1123 08:44:02.741510 206485 logs.go:282] 0 containers: []
W1123 08:44:02.741523 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:44:02.741529 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:44:02.741595 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:44:02.778417 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:02.778442 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:02.778448 206485 cri.go:89] found id: ""
I1123 08:44:02.778456 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:44:02.778518 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.784422 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:02.789717 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:44:02.789794 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:44:02.821165 206485 cri.go:89] found id: ""
I1123 08:44:02.821194 206485 logs.go:282] 0 containers: []
W1123 08:44:02.821205 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:44:02.821216 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:44:02.821271 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:44:02.852719 206485 cri.go:89] found id: ""
I1123 08:44:02.852745 206485 logs.go:282] 0 containers: []
W1123 08:44:02.852754 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:44:02.852766 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:44:02.852785 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:02.892590 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:44:02.892629 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:02.926138 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:44:02.926174 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:02.962943 206485 logs.go:123] Gathering logs for container status ...
I1123 08:44:02.962982 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:44:02.999133 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:44:02.999165 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:44:03.103866 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:44:03.103901 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:44:03.118230 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:44:03.118258 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:03.152826 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:44:03.152853 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:03.207774 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:44:03.207809 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:44:03.255093 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:44:03.255135 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:44:03.316127 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:44:03.316156 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:44:03.316171 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:03.350816 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:44:03.350855 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:05.885724 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:44:05.886146 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:44:05.886208 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:44:05.886271 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:44:05.912631 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:05.912667 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:05.912672 206485 cri.go:89] found id: ""
I1123 08:44:05.912681 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:44:05.912736 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:05.916915 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:05.920714 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:44:05.920785 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:44:05.948197 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:05.948226 206485 cri.go:89] found id: ""
I1123 08:44:05.948237 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:44:05.948297 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:05.952344 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:44:05.952394 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:44:05.979281 206485 cri.go:89] found id: ""
I1123 08:44:05.979302 206485 logs.go:282] 0 containers: []
W1123 08:44:05.979309 206485 logs.go:284] No container was found matching "coredns"
I1123 08:44:05.979315 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:44:05.979360 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:44:06.005748 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:06.005775 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:06.005781 206485 cri.go:89] found id: ""
I1123 08:44:06.005790 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:44:06.005842 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.009813 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.013567 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:44:06.013631 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:44:06.040041 206485 cri.go:89] found id: ""
I1123 08:44:06.040069 206485 logs.go:282] 0 containers: []
W1123 08:44:06.040082 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:44:06.040090 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:44:06.040146 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:44:06.068400 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:06.068423 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:06.068428 206485 cri.go:89] found id: ""
I1123 08:44:06.068435 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:44:06.068489 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.072472 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:06.076295 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:44:06.076354 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:44:06.102497 206485 cri.go:89] found id: ""
I1123 08:44:06.102525 206485 logs.go:282] 0 containers: []
W1123 08:44:06.102538 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:44:06.102546 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:44:06.102607 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:44:06.130104 206485 cri.go:89] found id: ""
I1123 08:44:06.130125 206485 logs.go:282] 0 containers: []
W1123 08:44:06.130132 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:44:06.130141 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:44:06.130150 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:44:06.219429 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:44:06.219465 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:44:06.278463 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:44:06.278491 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:44:06.278507 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:06.315308 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:44:06.315344 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:06.374595 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:44:06.374627 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:06.404338 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:44:06.404365 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:44:06.453101 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:44:06.453130 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:44:06.466457 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:44:06.466503 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:06.499235 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:44:06.499264 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:06.531782 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:44:06.531811 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:06.567190 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:44:06.567225 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:06.595596 206485 logs.go:123] Gathering logs for container status ...
I1123 08:44:06.595626 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1123 08:44:02.953927 258086 addons.go:530] duration metric: took 536.142427ms for enable addons: enabled=[storage-provisioner default-storageclass]
I1123 08:44:03.227564 258086 kapi.go:214] "coredns" deployment in "kube-system" namespace and "no-preload-999106" context rescaled to 1 replicas
W1123 08:44:04.727505 258086 node_ready.go:57] node "no-preload-999106" has "Ready":"False" status (will retry)
W1123 08:44:07.227319 258086 node_ready.go:57] node "no-preload-999106" has "Ready":"False" status (will retry)
I1123 08:44:09.129199 206485 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ...
I1123 08:44:09.129705 206485 api_server.go:269] stopped: https://192.168.76.2:8443/healthz: Get "https://192.168.76.2:8443/healthz": dial tcp 192.168.76.2:8443: connect: connection refused
I1123 08:44:09.129766 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-apiserver Namespaces:[]}
I1123 08:44:09.129825 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1123 08:44:09.156517 206485 cri.go:89] found id: "630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:09.156541 206485 cri.go:89] found id: "fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:09.156546 206485 cri.go:89] found id: ""
I1123 08:44:09.156553 206485 logs.go:282] 2 containers: [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3]
I1123 08:44:09.156609 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.160731 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.164606 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:etcd Namespaces:[]}
I1123 08:44:09.164701 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1123 08:44:09.190968 206485 cri.go:89] found id: "044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:09.190989 206485 cri.go:89] found id: ""
I1123 08:44:09.190998 206485 logs.go:282] 1 containers: [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1]
I1123 08:44:09.191055 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.195105 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:coredns Namespaces:[]}
I1123 08:44:09.195172 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1123 08:44:09.222111 206485 cri.go:89] found id: ""
I1123 08:44:09.222135 206485 logs.go:282] 0 containers: []
W1123 08:44:09.222143 206485 logs.go:284] No container was found matching "coredns"
I1123 08:44:09.222150 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-scheduler Namespaces:[]}
I1123 08:44:09.222208 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1123 08:44:09.249482 206485 cri.go:89] found id: "1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:09.249504 206485 cri.go:89] found id: "c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:09.249508 206485 cri.go:89] found id: ""
I1123 08:44:09.249514 206485 logs.go:282] 2 containers: [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9]
I1123 08:44:09.249571 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.253482 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.257347 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-proxy Namespaces:[]}
I1123 08:44:09.257412 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1123 08:44:09.284419 206485 cri.go:89] found id: ""
I1123 08:44:09.284442 206485 logs.go:282] 0 containers: []
W1123 08:44:09.284455 206485 logs.go:284] No container was found matching "kube-proxy"
I1123 08:44:09.284463 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kube-controller-manager Namespaces:[]}
I1123 08:44:09.284516 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1123 08:44:09.310860 206485 cri.go:89] found id: "5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:09.310887 206485 cri.go:89] found id: "a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:09.310893 206485 cri.go:89] found id: ""
I1123 08:44:09.310902 206485 logs.go:282] 2 containers: [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e]
I1123 08:44:09.310958 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.315221 206485 ssh_runner.go:195] Run: which crictl
I1123 08:44:09.319027 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:kindnet Namespaces:[]}
I1123 08:44:09.319091 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1123 08:44:09.345532 206485 cri.go:89] found id: ""
I1123 08:44:09.345557 206485 logs.go:282] 0 containers: []
W1123 08:44:09.345568 206485 logs.go:284] No container was found matching "kindnet"
I1123 08:44:09.345575 206485 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:all Name:storage-provisioner Namespaces:[]}
I1123 08:44:09.345656 206485 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=storage-provisioner
I1123 08:44:09.373433 206485 cri.go:89] found id: ""
I1123 08:44:09.373457 206485 logs.go:282] 0 containers: []
W1123 08:44:09.373467 206485 logs.go:284] No container was found matching "storage-provisioner"
I1123 08:44:09.373478 206485 logs.go:123] Gathering logs for dmesg ...
I1123 08:44:09.373511 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1123 08:44:09.388342 206485 logs.go:123] Gathering logs for describe nodes ...
I1123 08:44:09.388377 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W1123 08:44:09.446418 206485 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I1123 08:44:09.446441 206485 logs.go:123] Gathering logs for kube-apiserver [630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391] ...
I1123 08:44:09.446457 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 630b64b5be0cb4dca7e76e587870658d617f546c982e0093315a0a29f8601391"
I1123 08:44:09.480003 206485 logs.go:123] Gathering logs for kube-apiserver [fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3] ...
I1123 08:44:09.480036 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 fb8518f573158d97f53fb245d32078fea9adfe77d427a893fc59a99e978ebdb3"
I1123 08:44:09.520856 206485 logs.go:123] Gathering logs for kube-scheduler [1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a] ...
I1123 08:44:09.520887 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 1cf7292c277af6cb045477d977b4eb3ac8a26073812db340c39829184d070d7a"
I1123 08:44:09.580293 206485 logs.go:123] Gathering logs for kube-scheduler [c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9] ...
I1123 08:44:09.580334 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 c82c9e1d93a5e6d0c97d2a50653e0a0e24a7d09dd9bc31f38c76b1e52ebb35f9"
I1123 08:44:09.614373 206485 logs.go:123] Gathering logs for kube-controller-manager [5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b] ...
I1123 08:44:09.614404 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 5b31aa0ae9e6f4796c821d96da5ffc1e20bc83e10fbbf63bd1b9716b861bd26b"
I1123 08:44:09.643177 206485 logs.go:123] Gathering logs for kube-controller-manager [a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e] ...
I1123 08:44:09.643204 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 a43b20b4a15ff89af8a76d1f03bfab0a98debb934612c0de9daeabb46141d54e"
I1123 08:44:09.676566 206485 logs.go:123] Gathering logs for kubelet ...
I1123 08:44:09.676593 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1123 08:44:09.771524 206485 logs.go:123] Gathering logs for etcd [044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1] ...
I1123 08:44:09.771560 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/local/bin/crictl logs --tail 400 044e753c77fd5ac122f63a6399605382f8ab9de0635c6673d96f00897dd6e4e1"
I1123 08:44:09.803272 206485 logs.go:123] Gathering logs for containerd ...
I1123 08:44:09.803301 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u containerd -n 400"
I1123 08:44:09.851726 206485 logs.go:123] Gathering logs for container status ...
I1123 08:44:09.851765 206485 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
1357388ae0aa5 56cc512116c8f 10 seconds ago Running busybox 0 34632f38cdf63 busybox default
80475d9bc2771 ead0a4a53df89 15 seconds ago Running coredns 0 cd75a3dc79d90 coredns-5dd5756b68-2fdsv kube-system
089b66b211cc0 6e38f40d628db 15 seconds ago Running storage-provisioner 0 8489f4374b9ca storage-provisioner kube-system
39b3d72b0119b 409467f978b4a 26 seconds ago Running kindnet-cni 0 4e7fe0b0a93a6 kindnet-q8xnm kube-system
ef4e4389e44ca ea1030da44aa1 29 seconds ago Running kube-proxy 0 5b9d69d308423 kube-proxy-tzq9b kube-system
0ef7f303a2ce3 f6f496300a2ae 47 seconds ago Running kube-scheduler 0 2757f6f1f2847 kube-scheduler-old-k8s-version-204346 kube-system
8f2985624466e 4be79c38a4bab 47 seconds ago Running kube-controller-manager 0 7d13da4692cf0 kube-controller-manager-old-k8s-version-204346 kube-system
328d012e2a9c6 bb5e0dde9054c 47 seconds ago Running kube-apiserver 0 801b406a053e0 kube-apiserver-old-k8s-version-204346 kube-system
09bd2ad51bcbe 73deb9a3f7025 47 seconds ago Running etcd 0 bd3a3ff71b569 etcd-old-k8s-version-204346 kube-system
==> containerd <==
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.554367695Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5dd5756b68-2fdsv,Uid:1c71e052-b3c2-4875-8aeb-7d724ee26e06,Namespace:kube-system,Attempt:0,} returns sandbox id \"cd75a3dc79d9055a439d60e0b8c3a0eaf0c09774664074c042478ddbd42d8ed7\""
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.557881691Z" level=info msg="CreateContainer within sandbox \"cd75a3dc79d9055a439d60e0b8c3a0eaf0c09774664074c042478ddbd42d8ed7\" for container &ContainerMetadata{Name:coredns,Attempt:0,}"
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.565420837Z" level=info msg="Container 80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a: CDI devices from CRI Config.CDIDevices: []"
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.572367270Z" level=info msg="CreateContainer within sandbox \"cd75a3dc79d9055a439d60e0b8c3a0eaf0c09774664074c042478ddbd42d8ed7\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a\""
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.573105266Z" level=info msg="StartContainer for \"80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a\""
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.573985605Z" level=info msg="connecting to shim 80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a" address="unix:///run/containerd/s/402875f21b0b7b033dcd7b3cca8f2720835d3f90418b17dd5f3df52485b09e0c" protocol=ttrpc version=3
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.602588352Z" level=info msg="StartContainer for \"089b66b211cc086767c9fdf40aba06bcf7b4484c0976381a4bdf51afe2621f61\" returns successfully"
Nov 23 08:43:57 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:43:57.630751490Z" level=info msg="StartContainer for \"80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a\" returns successfully"
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.171495043Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:85a1fcd5-ee10-4749-9dec-40efed82eb3e,Namespace:default,Attempt:0,}"
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.210794452Z" level=info msg="connecting to shim 34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996" address="unix:///run/containerd/s/9131634b5b9e099a09d55b33b67bba908aad637f11b87abf7ed2211b15f763a9" namespace=k8s.io protocol=ttrpc version=3
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.287286149Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:busybox,Uid:85a1fcd5-ee10-4749-9dec-40efed82eb3e,Namespace:default,Attempt:0,} returns sandbox id \"34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996\""
Nov 23 08:44:00 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:00.289225870Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.394106458Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.394929355Z" level=info msg="stop pulling image gcr.io/k8s-minikube/busybox:1.28.4-glibc: active requests=0, bytes read=2396643"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.396449964Z" level=info msg="ImageCreate event name:\"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.399611876Z" level=info msg="ImageCreate event name:\"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.400256412Z" level=info msg="Pulled image \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" with image id \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\", repo tag \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\", repo digest \"gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e\", size \"2395207\" in 2.110984688s"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.400309785Z" level=info msg="PullImage \"gcr.io/k8s-minikube/busybox:1.28.4-glibc\" returns image reference \"sha256:56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.402701592Z" level=info msg="CreateContainer within sandbox \"34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996\" for container &ContainerMetadata{Name:busybox,Attempt:0,}"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.410744826Z" level=info msg="Container 1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5: CDI devices from CRI Config.CDIDevices: []"
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.419870192Z" level=info msg="CreateContainer within sandbox \"34632f38cdf63a655e8bb7d39dd15ba97b0a7a53c3d2190fc06701fde9c49996\" for &ContainerMetadata{Name:busybox,Attempt:0,} returns container id \"1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.421053047Z" level=info msg="StartContainer for \"1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5\""
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.422071051Z" level=info msg="connecting to shim 1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5" address="unix:///run/containerd/s/9131634b5b9e099a09d55b33b67bba908aad637f11b87abf7ed2211b15f763a9" protocol=ttrpc version=3
Nov 23 08:44:02 old-k8s-version-204346 containerd[661]: time="2025-11-23T08:44:02.495260690Z" level=info msg="StartContainer for \"1357388ae0aa594dabe5692b9f6c39afa871a26d6dd0b5809e1510839a986dd5\" returns successfully"
Nov 23 08:44:09 old-k8s-version-204346 containerd[661]: E1123 08:44:09.948064 661 websocket.go:100] "Unhandled Error" err="unable to upgrade websocket connection: websocket server finished before becoming ready" logger="UnhandledError"
==> coredns [80475d9bc2771a5b76c88ec3e691c3e9e026b5054aa1bbf27b0fd3499a79fd1a] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 25cf5af2951e282c4b0e961a02fb5d3e57c974501832fee92eec17b5135b9ec9d9e87d2ac94e6d117a5ed3dd54e8800aa7b4479706eb54497145ccdb80397d1b
CoreDNS-1.10.1
linux/amd64, go1.20, 055b2c3
[INFO] 127.0.0.1:38064 - 25011 "HINFO IN 3150570816276822377.3169321318277058455. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.024835318s
==> describe nodes <==
Name: old-k8s-version-204346
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=old-k8s-version-204346
kubernetes.io/os=linux
minikube.k8s.io/commit=50c3a8a3c03e8a84b6c978a884d21c3de8c6d4f1
minikube.k8s.io/name=old-k8s-version-204346
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T08_43_30_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 08:43:26 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: old-k8s-version-204346
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 08:44:10 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:25 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:25 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:25 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 08:44:00 +0000 Sun, 23 Nov 2025 08:43:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.103.2
Hostname: old-k8s-version-204346
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863352Ki
pods: 110
System Info:
Machine ID: 9629f1d5bc1ed524a56ce23c69214c09
System UUID: ddf0e41b-1230-4041-b2b0-aca7ba0a6fe4
Boot ID: 3bab2277-1db4-4284-9fcc-5d1d58e87eb4
Kernel Version: 6.8.0-1044-gcp
OS Image: Debian GNU/Linux 12 (bookworm)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://2.1.5
Kubelet Version: v1.28.0
Kube-Proxy Version: v1.28.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14s
kube-system coredns-5dd5756b68-2fdsv 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 31s
kube-system etcd-old-k8s-version-204346 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 44s
kube-system kindnet-q8xnm 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 31s
kube-system kube-apiserver-old-k8s-version-204346 250m (3%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system kube-controller-manager-old-k8s-version-204346 200m (2%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system kube-proxy-tzq9b 0 (0%) 0 (0%) 0 (0%) 0 (0%) 31s
kube-system kube-scheduler-old-k8s-version-204346 100m (1%) 0 (0%) 0 (0%) 0 (0%) 44s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 30s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%) 100m (1%)
memory 220Mi (0%) 220Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 29s kube-proxy
Normal Starting 49s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 49s (x8 over 49s) kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 49s (x8 over 49s) kubelet Node old-k8s-version-204346 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 49s (x7 over 49s) kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 49s kubelet Updated Node Allocatable limit across pods
Normal Starting 44s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 44s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 44s kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 44s kubelet Node old-k8s-version-204346 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 44s kubelet Node old-k8s-version-204346 status is now: NodeHasSufficientPID
Normal RegisteredNode 31s node-controller Node old-k8s-version-204346 event: Registered Node old-k8s-version-204346 in Controller
Normal NodeReady 16s kubelet Node old-k8s-version-204346 status is now: NodeReady
==> dmesg <==
[Nov23 07:17] MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.
[ +0.000998] TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.
[ +0.001000] MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.
[ +0.084011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.395963] i8042: Warning: Keylock active
[ +0.012075] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.497035] block sda: the capability attribute has been deprecated.
[ +0.088048] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.022581] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +5.308229] kauditd_printk_skb: 47 callbacks suppressed
==> etcd [09bd2ad51bcbe3133715a0348c39fbd488688f92fdc757fef7b242366c6eb72b] <==
{"level":"info","ts":"2025-11-23T08:43:25.072307Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 switched to configuration voters=(17451554867067011209)"}
{"level":"info","ts":"2025-11-23T08:43:25.072449Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","added-peer-id":"f23060b075c4c089","added-peer-peer-urls":["https://192.168.103.2:2380"]}
{"level":"info","ts":"2025-11-23T08:43:25.073769Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-11-23T08:43:25.074175Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"f23060b075c4c089","initial-advertise-peer-urls":["https://192.168.103.2:2380"],"listen-peer-urls":["https://192.168.103.2:2380"],"advertise-client-urls":["https://192.168.103.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.103.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-11-23T08:43:25.073803Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.103.2:2380"}
{"level":"info","ts":"2025-11-23T08:43:25.074517Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.103.2:2380"}
{"level":"info","ts":"2025-11-23T08:43:25.074362Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-11-23T08:43:25.459144Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 is starting a new election at term 1"}
{"level":"info","ts":"2025-11-23T08:43:25.459188Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became pre-candidate at term 1"}
{"level":"info","ts":"2025-11-23T08:43:25.459233Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 received MsgPreVoteResp from f23060b075c4c089 at term 1"}
{"level":"info","ts":"2025-11-23T08:43:25.459253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became candidate at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.459261Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 received MsgVoteResp from f23060b075c4c089 at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.459281Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"f23060b075c4c089 became leader at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.459298Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: f23060b075c4c089 elected leader f23060b075c4c089 at term 2"}
{"level":"info","ts":"2025-11-23T08:43:25.460336Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"f23060b075c4c089","local-member-attributes":"{Name:old-k8s-version-204346 ClientURLs:[https://192.168.103.2:2379]}","request-path":"/0/members/f23060b075c4c089/attributes","cluster-id":"3336683c081d149d","publish-timeout":"7s"}
{"level":"info","ts":"2025-11-23T08:43:25.460368Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:43:25.460352Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.460547Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-11-23T08:43:25.46207Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-11-23T08:43:25.460343Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-11-23T08:43:25.46151Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"3336683c081d149d","local-member-id":"f23060b075c4c089","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.462309Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.462347Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2025-11-23T08:43:25.461945Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-11-23T08:43:25.466791Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.103.2:2379"}
==> kernel <==
08:44:13 up 1:26, 0 user, load average: 2.68, 2.53, 1.78
Linux old-k8s-version-204346 6.8.0-1044-gcp #47~22.04.1-Ubuntu SMP Thu Oct 23 21:07:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kindnet [39b3d72b0119bcc6ecd6e57b170ea19f5592bba7f48f0436c996349c8ca348dd] <==
I1123 08:43:46.866967 1 main.go:109] connected to apiserver: https://10.96.0.1:443
I1123 08:43:46.867287 1 main.go:139] hostIP = 192.168.103.2
podIP = 192.168.103.2
I1123 08:43:46.867434 1 main.go:148] setting mtu 1500 for CNI
I1123 08:43:46.867454 1 main.go:178] kindnetd IP family: "ipv4"
I1123 08:43:46.867482 1 main.go:182] noMask IPv4 subnets: [10.244.0.0/16]
time="2025-11-23T08:43:47Z" level=info msg="Created plugin 10-kube-network-policies (kindnetd, handles RunPodSandbox,RemovePodSandbox)"
I1123 08:43:47.067711 1 controller.go:377] "Starting controller" name="kube-network-policies"
I1123 08:43:47.067748 1 controller.go:381] "Waiting for informer caches to sync"
I1123 08:43:47.067760 1 shared_informer.go:350] "Waiting for caches to sync" controller="kube-network-policies"
I1123 08:43:47.067904 1 controller.go:390] nri plugin exited: failed to connect to NRI service: dial unix /var/run/nri/nri.sock: connect: no such file or directory
I1123 08:43:47.369355 1 shared_informer.go:357] "Caches are synced" controller="kube-network-policies"
I1123 08:43:47.369384 1 metrics.go:72] Registering metrics
I1123 08:43:47.369441 1 controller.go:711] "Syncing nftables rules"
I1123 08:43:57.076844 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1123 08:43:57.076915 1 main.go:301] handling current node
I1123 08:44:07.068039 1 main.go:297] Handling node with IPs: map[192.168.103.2:{}]
I1123 08:44:07.068093 1 main.go:301] handling current node
==> kube-apiserver [328d012e2a9c60b89bce2737c3bcb6c1f31581c21f2a3f2969cf002ad66bc982] <==
I1123 08:43:26.887380 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1123 08:43:26.887389 1 cache.go:39] Caches are synced for autoregister controller
I1123 08:43:26.887641 1 apf_controller.go:377] Running API Priority and Fairness config worker
I1123 08:43:26.887685 1 apf_controller.go:380] Running API Priority and Fairness periodic rebalancing process
I1123 08:43:26.887980 1 shared_informer.go:318] Caches are synced for configmaps
I1123 08:43:26.888304 1 shared_informer.go:318] Caches are synced for cluster_authentication_trust_controller
E1123 08:43:26.889201 1 controller.go:146] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms"
I1123 08:43:26.889373 1 controller.go:624] quota admission added evaluator for: namespaces
I1123 08:43:26.893730 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1123 08:43:27.092344 1 controller.go:624] quota admission added evaluator for: leases.coordination.k8s.io
I1123 08:43:27.794220 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I1123 08:43:27.798285 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I1123 08:43:27.798301 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I1123 08:43:28.278123 1 controller.go:624] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 08:43:28.347605 1 controller.go:624] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 08:43:28.396516 1 alloc.go:330] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W1123 08:43:28.402119 1 lease.go:263] Resetting endpoints for master service "kubernetes" to [192.168.103.2]
I1123 08:43:28.403251 1 controller.go:624] quota admission added evaluator for: endpoints
I1123 08:43:28.410689 1 controller.go:624] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 08:43:28.846011 1 controller.go:624] quota admission added evaluator for: serviceaccounts
I1123 08:43:29.796332 1 controller.go:624] quota admission added evaluator for: deployments.apps
I1123 08:43:29.808173 1 alloc.go:330] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I1123 08:43:29.820075 1 controller.go:624] quota admission added evaluator for: daemonsets.apps
I1123 08:43:42.454084 1 controller.go:624] quota admission added evaluator for: controllerrevisions.apps
I1123 08:43:42.555727 1 controller.go:624] quota admission added evaluator for: replicasets.apps
==> kube-controller-manager [8f2985624466e7aea2ab0922f065c597c0bfd5950e9a7d9af9278d532ea162aa] <==
I1123 08:43:42.301940 1 shared_informer.go:318] Caches are synced for resource quota
I1123 08:43:42.313117 1 shared_informer.go:318] Caches are synced for endpoint
I1123 08:43:42.320707 1 shared_informer.go:318] Caches are synced for resource quota
I1123 08:43:42.468731 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-tzq9b"
I1123 08:43:42.470032 1 event.go:307] "Event occurred" object="kube-system/kindnet" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kindnet-q8xnm"
I1123 08:43:42.562465 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5dd5756b68 to 2"
I1123 08:43:42.637391 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:43:42.693556 1 shared_informer.go:318] Caches are synced for garbage collector
I1123 08:43:42.693596 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage"
I1123 08:43:42.710317 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-j49bt"
I1123 08:43:42.720116 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5dd5756b68-2fdsv"
I1123 08:43:42.729591 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="167.450584ms"
I1123 08:43:42.750029 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="20.070236ms"
I1123 08:43:42.772635 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="22.530968ms"
I1123 08:43:42.772808 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="87.9µs"
I1123 08:43:42.817260 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled down replica set coredns-5dd5756b68 to 1 from 2"
I1123 08:43:42.828181 1 event.go:307] "Event occurred" object="kube-system/coredns-5dd5756b68" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulDelete" message="Deleted pod: coredns-5dd5756b68-j49bt"
I1123 08:43:42.834660 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="17.534321ms"
I1123 08:43:42.847353 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="12.631926ms"
I1123 08:43:42.847627 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="198.148µs"
I1123 08:43:57.121773 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="186.5µs"
I1123 08:43:57.150540 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="96.97µs"
I1123 08:43:57.197693 1 node_lifecycle_controller.go:1048] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
I1123 08:43:57.981361 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="7.07769ms"
I1123 08:43:57.981507 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="kube-system/coredns-5dd5756b68" duration="71.031µs"
==> kube-proxy [ef4e4389e44ca59002bc45aac4774894eff14408a6f6654c403f41a7f5ae9178] <==
I1123 08:43:43.138692 1 server_others.go:69] "Using iptables proxy"
I1123 08:43:43.148849 1 node.go:141] Successfully retrieved node IP: 192.168.103.2
I1123 08:43:43.173806 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1123 08:43:43.177107 1 server_others.go:152] "Using iptables Proxier"
I1123 08:43:43.177190 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I1123 08:43:43.177209 1 server_others.go:438] "Defaulting to no-op detect-local"
I1123 08:43:43.177247 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I1123 08:43:43.177554 1 server.go:846] "Version info" version="v1.28.0"
I1123 08:43:43.177673 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:43:43.178478 1 config.go:188] "Starting service config controller"
I1123 08:43:43.178510 1 shared_informer.go:311] Waiting for caches to sync for service config
I1123 08:43:43.179694 1 config.go:97] "Starting endpoint slice config controller"
I1123 08:43:43.179818 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I1123 08:43:43.180065 1 config.go:315] "Starting node config controller"
I1123 08:43:43.180084 1 shared_informer.go:311] Waiting for caches to sync for node config
I1123 08:43:43.280364 1 shared_informer.go:318] Caches are synced for service config
I1123 08:43:43.280485 1 shared_informer.go:318] Caches are synced for node config
I1123 08:43:43.280575 1 shared_informer.go:318] Caches are synced for endpoint slice config
==> kube-scheduler [0ef7f303a2ce364a193b1c3a534acf3ce3197306c4c2cc9dd0d5717ae9adf953] <==
W1123 08:43:26.854417 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1123 08:43:26.854437 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W1123 08:43:26.854443 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 08:43:26.854473 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 08:43:26.854661 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1123 08:43:26.854686 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W1123 08:43:26.854994 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1123 08:43:26.855027 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W1123 08:43:27.681328 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1123 08:43:27.681369 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W1123 08:43:27.807379 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1123 08:43:27.807413 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W1123 08:43:27.818838 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1123 08:43:27.818882 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W1123 08:43:27.819991 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1123 08:43:27.820027 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W1123 08:43:27.871687 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1123 08:43:27.871733 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W1123 08:43:27.919852 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1123 08:43:27.919895 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W1123 08:43:28.036804 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1123 08:43:28.036839 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W1123 08:43:28.055978 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1123 08:43:28.056016 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
I1123 08:43:29.649311 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.141354 1529 kuberuntime_manager.go:1463] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.142046 1529 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.476770 1529 topology_manager.go:215] "Topology Admit Handler" podUID="5d122719-2577-438f-bae7-72a1034f88ef" podNamespace="kube-system" podName="kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.478900 1529 topology_manager.go:215] "Topology Admit Handler" podUID="c3178adf-8eb3-4210-9674-fdda89d3317d" podNamespace="kube-system" podName="kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651490 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksdwp\" (UniqueName: \"kubernetes.io/projected/5d122719-2577-438f-bae7-72a1034f88ef-kube-api-access-ksdwp\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651698 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/c3178adf-8eb3-4210-9674-fdda89d3317d-lib-modules\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651862 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5d122719-2577-438f-bae7-72a1034f88ef-lib-modules\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651898 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/c3178adf-8eb3-4210-9674-fdda89d3317d-cni-cfg\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651928 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/c3178adf-8eb3-4210-9674-fdda89d3317d-xtables-lock\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651960 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9ntt\" (UniqueName: \"kubernetes.io/projected/c3178adf-8eb3-4210-9674-fdda89d3317d-kube-api-access-m9ntt\") pod \"kindnet-q8xnm\" (UID: \"c3178adf-8eb3-4210-9674-fdda89d3317d\") " pod="kube-system/kindnet-q8xnm"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.651992 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/5d122719-2577-438f-bae7-72a1034f88ef-kube-proxy\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:42 old-k8s-version-204346 kubelet[1529]: I1123 08:43:42.652021 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/5d122719-2577-438f-bae7-72a1034f88ef-xtables-lock\") pod \"kube-proxy-tzq9b\" (UID: \"5d122719-2577-438f-bae7-72a1034f88ef\") " pod="kube-system/kube-proxy-tzq9b"
Nov 23 08:43:46 old-k8s-version-204346 kubelet[1529]: I1123 08:43:46.940830 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-tzq9b" podStartSLOduration=4.940768474 podCreationTimestamp="2025-11-23 08:43:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:43:43.932316562 +0000 UTC m=+14.168739010" watchObservedRunningTime="2025-11-23 08:43:46.940768474 +0000 UTC m=+17.177190922"
Nov 23 08:43:46 old-k8s-version-204346 kubelet[1529]: I1123 08:43:46.940988 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kindnet-q8xnm" podStartSLOduration=1.718157541 podCreationTimestamp="2025-11-23 08:43:42 +0000 UTC" firstStartedPulling="2025-11-23 08:43:43.30687244 +0000 UTC m=+13.543294877" lastFinishedPulling="2025-11-23 08:43:46.52967151 +0000 UTC m=+16.766093948" observedRunningTime="2025-11-23 08:43:46.940594815 +0000 UTC m=+17.177017264" watchObservedRunningTime="2025-11-23 08:43:46.940956612 +0000 UTC m=+17.177379059"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.093693 1529 kubelet_node_status.go:493] "Fast updating node status as it just became ready"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.122486 1529 topology_manager.go:215] "Topology Admit Handler" podUID="1c71e052-b3c2-4875-8aeb-7d724ee26e06" podNamespace="kube-system" podName="coredns-5dd5756b68-2fdsv"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.122759 1529 topology_manager.go:215] "Topology Admit Handler" podUID="372382d8-d23f-4e6d-89ae-8f2c9c46b6dc" podNamespace="kube-system" podName="storage-provisioner"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263400 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c71e052-b3c2-4875-8aeb-7d724ee26e06-config-volume\") pod \"coredns-5dd5756b68-2fdsv\" (UID: \"1c71e052-b3c2-4875-8aeb-7d724ee26e06\") " pod="kube-system/coredns-5dd5756b68-2fdsv"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263464 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-474bl\" (UniqueName: \"kubernetes.io/projected/1c71e052-b3c2-4875-8aeb-7d724ee26e06-kube-api-access-474bl\") pod \"coredns-5dd5756b68-2fdsv\" (UID: \"1c71e052-b3c2-4875-8aeb-7d724ee26e06\") " pod="kube-system/coredns-5dd5756b68-2fdsv"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263575 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/372382d8-d23f-4e6d-89ae-8f2c9c46b6dc-tmp\") pod \"storage-provisioner\" (UID: \"372382d8-d23f-4e6d-89ae-8f2c9c46b6dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.263625 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cbg7\" (UniqueName: \"kubernetes.io/projected/372382d8-d23f-4e6d-89ae-8f2c9c46b6dc-kube-api-access-2cbg7\") pod \"storage-provisioner\" (UID: \"372382d8-d23f-4e6d-89ae-8f2c9c46b6dc\") " pod="kube-system/storage-provisioner"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.963727 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.963673229 podCreationTimestamp="2025-11-23 08:43:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:43:57.963551537 +0000 UTC m=+28.199973987" watchObservedRunningTime="2025-11-23 08:43:57.963673229 +0000 UTC m=+28.200095677"
Nov 23 08:43:57 old-k8s-version-204346 kubelet[1529]: I1123 08:43:57.974383 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5dd5756b68-2fdsv" podStartSLOduration=15.974330092 podCreationTimestamp="2025-11-23 08:43:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-23 08:43:57.974110377 +0000 UTC m=+28.210532825" watchObservedRunningTime="2025-11-23 08:43:57.974330092 +0000 UTC m=+28.210752539"
Nov 23 08:43:59 old-k8s-version-204346 kubelet[1529]: I1123 08:43:59.862724 1529 topology_manager.go:215] "Topology Admit Handler" podUID="85a1fcd5-ee10-4749-9dec-40efed82eb3e" podNamespace="default" podName="busybox"
Nov 23 08:43:59 old-k8s-version-204346 kubelet[1529]: I1123 08:43:59.981400 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdg6d\" (UniqueName: \"kubernetes.io/projected/85a1fcd5-ee10-4749-9dec-40efed82eb3e-kube-api-access-tdg6d\") pod \"busybox\" (UID: \"85a1fcd5-ee10-4749-9dec-40efed82eb3e\") " pod="default/busybox"
==> storage-provisioner [089b66b211cc086767c9fdf40aba06bcf7b4484c0976381a4bdf51afe2621f61] <==
I1123 08:43:57.613751 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 08:43:57.624633 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 08:43:57.624700 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1123 08:43:57.633950 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1123 08:43:57.634082 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"a0771e73-2533-4e9a-bd83-ee78487b1f50", APIVersion:"v1", ResourceVersion:"432", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' old-k8s-version-204346_bff6cf86-fcf0-4fe3-b85e-b85b2509b23f became leader
I1123 08:43:57.634291 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_old-k8s-version-204346_bff6cf86-fcf0-4fe3-b85e-b85b2509b23f!
I1123 08:43:57.734684 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_old-k8s-version-204346_bff6cf86-fcf0-4fe3-b85e-b85b2509b23f!
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-204346 -n old-k8s-version-204346
helpers_test.go:269: (dbg) Run: kubectl --context old-k8s-version-204346 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestStartStop/group/old-k8s-version/serial/DeployApp FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestStartStop/group/old-k8s-version/serial/DeployApp (14.15s)