=== RUN TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p default-k8s-diff-port-032958 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 pause -p default-k8s-diff-port-032958 --alsologtostderr -v=1: (1.809165999s)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958: exit status 2 (15.850844873s)
-- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: post-pause apiserver status = "Stopped"; want = "Paused"
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
E1220 02:14:11.117451 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.122831 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.133306 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.153784 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.194035 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.274437 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.434883 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:11.755911 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:12.397056 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:13.677687 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1220 02:14:16.238180 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958: exit status 2 (15.842115077s)
-- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p default-k8s-diff-port-032958 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
helpers_test.go:253: <<< TestStartStop/group/default-k8s-diff-port/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p default-k8s-diff-port-032958 logs -n 25
E1220 02:14:21.358406 13018 cert_rotation.go:172] "Loading client cert failed" err="open /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/old-k8s-version-146675/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:256: (dbg) Done: out/minikube-linux-amd64 -p default-k8s-diff-port-032958 logs -n 25: (3.152680402s)
helpers_test.go:261: TestStartStop/group/default-k8s-diff-port/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬──────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼──────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ -p kindnet-503505 sudo systemctl cat kubelet --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo journalctl -xeu kubelet --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/kubernetes/kubelet.conf │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /var/lib/kubelet/config.yaml │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status docker --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl cat docker --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/docker/daemon.json │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo docker system info │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status cri-docker --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl cat cri-docker --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /usr/lib/systemd/system/cri-docker.service │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cri-dockerd --version │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status containerd --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl cat containerd --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /lib/systemd/system/containerd.service │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/containerd/config.toml │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo containerd config dump │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status crio --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ │
│ ssh │ -p kindnet-503505 sudo systemctl cat crio --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo crio config │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ delete │ -p kindnet-503505 │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ start │ -p false-503505 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=kvm2 --container-runtime=docker │ false-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ │
│ unpause │ -p default-k8s-diff-port-032958 --alsologtostderr -v=1 │ default-k8s-diff-port-032958 │ minitest │ v1.37.0 │ 20 Dec 25 02:14 UTC │ 20 Dec 25 02:14 UTC │
└─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴──────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/20 02:13:53
Running on machine: minitest-vm-9d09530a
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1220 02:13:53.658426 38979 out.go:360] Setting OutFile to fd 1 ...
I1220 02:13:53.658597 38979 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1220 02:13:53.658612 38979 out.go:374] Setting ErrFile to fd 2...
I1220 02:13:53.658620 38979 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1220 02:13:53.658880 38979 root.go:338] Updating PATH: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/bin
I1220 02:13:53.659482 38979 out.go:368] Setting JSON to false
I1220 02:13:53.660578 38979 start.go:133] hostinfo: {"hostname":"minitest-vm-9d09530a.c.k8s-infra-e2e-boskos-103.internal","uptime":3547,"bootTime":1766193287,"procs":209,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"24.04","kernelVersion":"6.14.0-1021-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"324b1d65-3a78-4886-9ab4-95ed3c96a31c"}
I1220 02:13:53.660687 38979 start.go:143] virtualization: kvm guest
I1220 02:13:53.662866 38979 out.go:179] * [false-503505] minikube v1.37.0 on Ubuntu 24.04 (kvm/amd64)
I1220 02:13:53.664260 38979 notify.go:221] Checking for updates...
I1220 02:13:53.664290 38979 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1220 02:13:53.665824 38979 out.go:179] - KUBECONFIG=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/kubeconfig
I1220 02:13:53.667283 38979 out.go:179] - MINIKUBE_HOME=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube
I1220 02:13:53.668904 38979 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1220 02:13:53.670341 38979 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1220 02:13:53.672156 38979 config.go:182] Loaded profile config "calico-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:13:53.672297 38979 config.go:182] Loaded profile config "custom-flannel-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:13:53.672434 38979 config.go:182] Loaded profile config "default-k8s-diff-port-032958": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:13:53.672545 38979 config.go:182] Loaded profile config "guest-073858": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v0.0.0
I1220 02:13:53.672679 38979 driver.go:422] Setting default libvirt URI to qemu:///system
I1220 02:13:53.714352 38979 out.go:179] * Using the kvm2 driver based on user configuration
I1220 02:13:53.715582 38979 start.go:309] selected driver: kvm2
I1220 02:13:53.715609 38979 start.go:928] validating driver "kvm2" against <nil>
I1220 02:13:53.715626 38979 start.go:939] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1220 02:13:53.716847 38979 start_flags.go:329] no existing cluster config was found, will generate one from the flags
I1220 02:13:53.717254 38979 start_flags.go:995] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1220 02:13:53.717297 38979 cni.go:84] Creating CNI manager for "false"
I1220 02:13:53.717349 38979 start.go:353] cluster config:
{Name:false-503505 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.3 ClusterName:false-503505 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:doc
ker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:false} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GP
Us: AutoPauseInterval:1m0s}
I1220 02:13:53.717508 38979 iso.go:125] acquiring lock: {Name:mk8cff2fd2ec419d0f1f974993910ae0235f0b9c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1220 02:13:53.719137 38979 out.go:179] * Starting "false-503505" primary control-plane node in "false-503505" cluster
I1220 02:13:53.720475 38979 preload.go:188] Checking if preload exists for k8s version v1.34.3 and runtime docker
I1220 02:13:53.720519 38979 preload.go:203] Found local preload: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.3-docker-overlay2-amd64.tar.lz4
I1220 02:13:53.720529 38979 cache.go:65] Caching tarball of preloaded images
I1220 02:13:53.720653 38979 preload.go:251] Found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1220 02:13:53.720670 38979 cache.go:68] Finished verifying existence of preloaded tar for v1.34.3 on docker
I1220 02:13:53.720801 38979 profile.go:143] Saving config to /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/false-503505/config.json ...
I1220 02:13:53.720830 38979 lock.go:35] WriteFile acquiring /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/false-503505/config.json: {Name:mkc8b6869a0bb6c3a942663395236fb8c2775a51 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1220 02:13:53.721027 38979 start.go:360] acquireMachinesLock for false-503505: {Name:mkeb3229b5d18611c16c8e938b31492b9b6546b6 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1220 02:13:53.721080 38979 start.go:364] duration metric: took 32.113µs to acquireMachinesLock for "false-503505"
I1220 02:13:53.721108 38979 start.go:93] Provisioning new machine with config: &{Name:false-503505 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubern
etesVersion:v1.34.3 ClusterName:false-503505 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:false} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryM
irror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}
I1220 02:13:53.721191 38979 start.go:125] createHost starting for "" (driver="kvm2")
I1220 02:13:53.104657 37878 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1220 02:13:53.203649 37878 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1220 02:13:53.414002 37878 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1220 02:13:53.414235 37878 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [custom-flannel-503505 localhost] and IPs [192.168.72.110 127.0.0.1 ::1]
I1220 02:13:53.718885 37878 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1220 02:13:53.719606 37878 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [custom-flannel-503505 localhost] and IPs [192.168.72.110 127.0.0.1 ::1]
I1220 02:13:54.333369 37878 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1220 02:13:54.424119 37878 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1220 02:13:54.440070 37878 kubeadm.go:319] [certs] Generating "sa" key and public key
I1220 02:13:54.440221 37878 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1220 02:13:54.643883 37878 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1220 02:13:54.882013 37878 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1220 02:13:54.904688 37878 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1220 02:13:55.025586 37878 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1220 02:13:55.145485 37878 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1220 02:13:55.145626 37878 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1220 02:13:55.148326 37878 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
W1220 02:13:54.723698 37762 node_ready.go:57] node "calico-503505" has "Ready":"False" status (will retry)
W1220 02:13:57.088471 37762 node_ready.go:57] node "calico-503505" has "Ready":"False" status (will retry)
I1220 02:13:55.150289 37878 out.go:252] - Booting up control plane ...
I1220 02:13:55.150458 37878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1220 02:13:55.151333 37878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1220 02:13:55.152227 37878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1220 02:13:55.175699 37878 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1220 02:13:55.175981 37878 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1220 02:13:55.186275 37878 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1220 02:13:55.186852 37878 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1220 02:13:55.186945 37878 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1220 02:13:55.443272 37878 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1220 02:13:55.443453 37878 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1220 02:13:57.443421 37878 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 2.001962214s
I1220 02:13:57.453249 37878 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1220 02:13:57.453392 37878 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.72.110:8443/livez
I1220 02:13:57.453521 37878 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1220 02:13:57.453636 37878 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1220 02:13:53.723129 38979 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
I1220 02:13:53.723383 38979 start.go:159] libmachine.API.Create for "false-503505" (driver="kvm2")
I1220 02:13:53.723423 38979 client.go:173] LocalClient.Create starting
I1220 02:13:53.723510 38979 main.go:144] libmachine: Reading certificate data from /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem
I1220 02:13:53.723557 38979 main.go:144] libmachine: Decoding PEM data...
I1220 02:13:53.723581 38979 main.go:144] libmachine: Parsing certificate...
I1220 02:13:53.723676 38979 main.go:144] libmachine: Reading certificate data from /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/cert.pem
I1220 02:13:53.723706 38979 main.go:144] libmachine: Decoding PEM data...
I1220 02:13:53.723725 38979 main.go:144] libmachine: Parsing certificate...
I1220 02:13:53.724182 38979 main.go:144] libmachine: creating domain...
I1220 02:13:53.724217 38979 main.go:144] libmachine: creating network...
I1220 02:13:53.725920 38979 main.go:144] libmachine: found existing default network
I1220 02:13:53.726255 38979 main.go:144] libmachine: <network connections='4'>
<name>default</name>
<uuid>650ca552-1913-49ac-a1fd-736d0c584a06</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:de:58:ff'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
I1220 02:13:53.727630 38979 network.go:211] skipping subnet 192.168.39.0/24 that is taken: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName:virbr1 IfaceIPv4:192.168.39.1 IfaceMTU:1500 IfaceMAC:52:54:00:e8:02:c4} reservation:<nil>}
I1220 02:13:53.728421 38979 network.go:211] skipping subnet 192.168.50.0/24 that is taken: &{IP:192.168.50.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.50.0/24 Gateway:192.168.50.1 ClientMin:192.168.50.2 ClientMax:192.168.50.254 Broadcast:192.168.50.255 IsPrivate:true Interface:{IfaceName:virbr2 IfaceIPv4:192.168.50.1 IfaceMTU:1500 IfaceMAC:52:54:00:8b:7d:ff} reservation:<nil>}
I1220 02:13:53.729869 38979 network.go:206] using free private subnet 192.168.61.0/24: &{IP:192.168.61.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.61.0/24 Gateway:192.168.61.1 ClientMin:192.168.61.2 ClientMax:192.168.61.254 Broadcast:192.168.61.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001aac760}
I1220 02:13:53.729965 38979 main.go:144] libmachine: defining private network:
<network>
<name>mk-false-503505</name>
<dns enable='no'/>
<ip address='192.168.61.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.61.2' end='192.168.61.253'/>
</dhcp>
</ip>
</network>
I1220 02:13:53.736168 38979 main.go:144] libmachine: creating private network mk-false-503505 192.168.61.0/24...
I1220 02:13:53.810612 38979 main.go:144] libmachine: private network mk-false-503505 192.168.61.0/24 created
I1220 02:13:53.810976 38979 main.go:144] libmachine: <network>
<name>mk-false-503505</name>
<uuid>145d091e-eda6-4cfe-8946-ea394cfc6f9d</uuid>
<bridge name='virbr3' stp='on' delay='0'/>
<mac address='52:54:00:b5:b9:98'/>
<dns enable='no'/>
<ip address='192.168.61.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.61.2' end='192.168.61.253'/>
</dhcp>
</ip>
</network>
I1220 02:13:53.811017 38979 main.go:144] libmachine: setting up store path in /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505 ...
I1220 02:13:53.811066 38979 main.go:144] libmachine: building disk image from file:///home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/iso/amd64/minikube-v1.37.0-1765965980-22186-amd64.iso
I1220 02:13:53.811082 38979 common.go:152] Making disk image using store path: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube
I1220 02:13:53.811185 38979 main.go:144] libmachine: Downloading /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/boot2docker.iso from file:///home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/iso/amd64/minikube-v1.37.0-1765965980-22186-amd64.iso...
I1220 02:13:54.101881 38979 common.go:159] Creating ssh key: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa...
I1220 02:13:54.171818 38979 common.go:165] Creating raw disk image: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/false-503505.rawdisk...
I1220 02:13:54.171860 38979 main.go:144] libmachine: Writing magic tar header
I1220 02:13:54.171878 38979 main.go:144] libmachine: Writing SSH key tar header
I1220 02:13:54.171952 38979 common.go:179] Fixing permissions on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505 ...
I1220 02:13:54.172017 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505
I1220 02:13:54.172042 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505 (perms=drwx------)
I1220 02:13:54.172055 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines
I1220 02:13:54.172068 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines (perms=drwxr-xr-x)
I1220 02:13:54.172080 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube
I1220 02:13:54.172089 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube (perms=drwxr-xr-x)
I1220 02:13:54.172097 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160
I1220 02:13:54.172106 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160 (perms=drwxrwxr-x)
I1220 02:13:54.172116 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration
I1220 02:13:54.172127 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration (perms=drwxrwxr-x)
I1220 02:13:54.172134 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest
I1220 02:13:54.172143 38979 main.go:144] libmachine: setting executable bit set on /home/minitest (perms=drwxr-x--x)
I1220 02:13:54.172153 38979 main.go:144] libmachine: checking permissions on dir: /home
I1220 02:13:54.172162 38979 main.go:144] libmachine: skipping /home - not owner
I1220 02:13:54.172166 38979 main.go:144] libmachine: defining domain...
I1220 02:13:54.173523 38979 main.go:144] libmachine: defining domain using XML:
<domain type='kvm'>
<name>false-503505</name>
<memory unit='MiB'>3072</memory>
<vcpu>2</vcpu>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'>
</cpu>
<os>
<type>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<devices>
<disk type='file' device='cdrom'>
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='default' io='threads' />
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/false-503505.rawdisk'/>
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='mk-false-503505'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
</rng>
</devices>
</domain>
I1220 02:13:54.178932 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:b7:52:73 in network default
I1220 02:13:54.179675 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:54.179696 38979 main.go:144] libmachine: starting domain...
I1220 02:13:54.179701 38979 main.go:144] libmachine: ensuring networks are active...
I1220 02:13:54.180774 38979 main.go:144] libmachine: Ensuring network default is active
I1220 02:13:54.181409 38979 main.go:144] libmachine: Ensuring network mk-false-503505 is active
I1220 02:13:54.182238 38979 main.go:144] libmachine: getting domain XML...
I1220 02:13:54.183538 38979 main.go:144] libmachine: starting domain XML:
<domain type='kvm'>
<name>false-503505</name>
<uuid>624dd300-6a99-4c02-9eff-8eb33e6519e9</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-noble'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/false-503505.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:4e:1e:41'/>
<source network='mk-false-503505'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:b7:52:73'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1220 02:13:55.365223 38979 main.go:144] libmachine: waiting for domain to start...
I1220 02:13:55.367393 38979 main.go:144] libmachine: domain is now running
I1220 02:13:55.367419 38979 main.go:144] libmachine: waiting for IP...
I1220 02:13:55.368500 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:55.369502 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:55.369522 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:55.369923 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:55.369977 38979 retry.go:31] will retry after 247.996373ms: waiting for domain to come up
I1220 02:13:55.619698 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:55.620501 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:55.620524 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:55.620981 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:55.621018 38979 retry.go:31] will retry after 253.163992ms: waiting for domain to come up
I1220 02:13:55.875623 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:55.876522 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:55.876543 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:55.876997 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:55.877034 38979 retry.go:31] will retry after 322.078046ms: waiting for domain to come up
I1220 02:13:56.200749 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:56.201573 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:56.201590 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:56.201993 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:56.202032 38979 retry.go:31] will retry after 398.279098ms: waiting for domain to come up
I1220 02:13:56.601723 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:56.602519 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:56.602554 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:56.603065 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:56.603103 38979 retry.go:31] will retry after 668.508453ms: waiting for domain to come up
I1220 02:13:57.272883 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:57.273735 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:57.273763 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:57.274179 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:57.274223 38979 retry.go:31] will retry after 936.48012ms: waiting for domain to come up
I1220 02:13:58.212951 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:58.213934 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:58.213955 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:58.214490 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:58.214540 38979 retry.go:31] will retry after 1.101549544s: waiting for domain to come up
W1220 02:13:59.093909 37762 node_ready.go:57] node "calico-503505" has "Ready":"False" status (will retry)
I1220 02:14:00.089963 37762 node_ready.go:49] node "calico-503505" is "Ready"
I1220 02:14:00.090003 37762 node_ready.go:38] duration metric: took 9.504754397s for node "calico-503505" to be "Ready" ...
I1220 02:14:00.090027 37762 api_server.go:52] waiting for apiserver process to appear ...
I1220 02:14:00.090096 37762 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1220 02:14:00.121908 37762 api_server.go:72] duration metric: took 11.479258368s to wait for apiserver process to appear ...
I1220 02:14:00.121945 37762 api_server.go:88] waiting for apiserver healthz status ...
I1220 02:14:00.121968 37762 api_server.go:253] Checking apiserver healthz at https://192.168.39.226:8443/healthz ...
I1220 02:14:00.133024 37762 api_server.go:279] https://192.168.39.226:8443/healthz returned 200:
ok
I1220 02:14:00.134499 37762 api_server.go:141] control plane version: v1.34.3
I1220 02:14:00.134533 37762 api_server.go:131] duration metric: took 12.580039ms to wait for apiserver health ...
I1220 02:14:00.134544 37762 system_pods.go:43] waiting for kube-system pods to appear ...
I1220 02:14:00.143085 37762 system_pods.go:59] 9 kube-system pods found
I1220 02:14:00.143143 37762 system_pods.go:61] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.143160 37762 system_pods.go:61] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.143171 37762 system_pods.go:61] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.143177 37762 system_pods.go:61] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.143183 37762 system_pods.go:61] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.143188 37762 system_pods.go:61] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.143194 37762 system_pods.go:61] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.143219 37762 system_pods.go:61] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.143233 37762 system_pods.go:61] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.143243 37762 system_pods.go:74] duration metric: took 8.690731ms to wait for pod list to return data ...
I1220 02:14:00.143254 37762 default_sa.go:34] waiting for default service account to be created ...
I1220 02:14:00.147300 37762 default_sa.go:45] found service account: "default"
I1220 02:14:00.147335 37762 default_sa.go:55] duration metric: took 4.072144ms for default service account to be created ...
I1220 02:14:00.147349 37762 system_pods.go:116] waiting for k8s-apps to be running ...
I1220 02:14:00.153827 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:00.153869 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.153882 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.153892 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.153900 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.153907 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.153911 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.153917 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.153922 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.153930 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.153953 37762 retry.go:31] will retry after 191.011989ms: missing components: kube-dns
I1220 02:14:00.353588 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:00.353638 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.353652 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.353665 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.353673 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.353681 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.353688 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.353696 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.353702 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.353710 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.353731 37762 retry.go:31] will retry after 332.593015ms: missing components: kube-dns
I1220 02:14:00.697960 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:00.698016 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.698032 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.698045 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.698051 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.698057 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.698062 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.698068 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.698073 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.698080 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.698098 37762 retry.go:31] will retry after 441.450882ms: missing components: kube-dns
I1220 02:14:01.147620 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:01.147663 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:01.147675 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:01.147685 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:01.147690 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:01.147697 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:01.147702 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:01.147707 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:01.147711 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:01.147718 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:01.147737 37762 retry.go:31] will retry after 398.996064ms: missing components: kube-dns
I1220 02:14:01.555710 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:01.555752 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:01.555764 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:01.555774 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:01.555779 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:01.555786 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:01.555791 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:01.555797 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:01.555802 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:01.555813 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:01.555831 37762 retry.go:31] will retry after 742.519055ms: missing components: kube-dns
I1220 02:14:02.306002 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:02.306049 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:02.306068 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:02.306080 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:02.306088 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:02.306097 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:02.306102 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:02.306109 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:02.306114 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:02.306119 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:02.306141 37762 retry.go:31] will retry after 687.588334ms: missing components: kube-dns
I1220 02:14:01.475480 37878 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 4.023883563s
I1220 02:13:59.318088 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:59.319169 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:59.319195 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:59.319707 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:59.319759 38979 retry.go:31] will retry after 1.133836082s: waiting for domain to come up
I1220 02:14:00.455752 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:00.457000 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:00.457032 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:00.457642 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:00.457696 38979 retry.go:31] will retry after 1.689205474s: waiting for domain to come up
I1220 02:14:02.149657 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:02.150579 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:02.150669 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:02.151167 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:02.151218 38979 retry.go:31] will retry after 1.402452731s: waiting for domain to come up
I1220 02:14:03.555309 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:03.556319 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:03.556389 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:03.556908 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:03.556948 38979 retry.go:31] will retry after 2.79303956s: waiting for domain to come up
I1220 02:14:03.304845 37878 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 5.854389668s
I1220 02:14:04.452000 37878 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 7.001670897s
I1220 02:14:04.482681 37878 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1220 02:14:04.509128 37878 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1220 02:14:04.533960 37878 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1220 02:14:04.534255 37878 kubeadm.go:319] [mark-control-plane] Marking the node custom-flannel-503505 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1220 02:14:04.549617 37878 kubeadm.go:319] [bootstrap-token] Using token: 5feew1.aaci0na7tzxpkq74
I1220 02:14:04.551043 37878 out.go:252] - Configuring RBAC rules ...
I1220 02:14:04.551218 37878 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1220 02:14:04.561847 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1220 02:14:04.591000 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1220 02:14:04.597908 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1220 02:14:04.606680 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1220 02:14:04.614933 37878 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1220 02:14:04.862442 37878 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1220 02:14:05.356740 37878 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1220 02:14:05.862025 37878 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1220 02:14:05.865061 37878 kubeadm.go:319]
I1220 02:14:05.865156 37878 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1220 02:14:05.865168 37878 kubeadm.go:319]
I1220 02:14:05.865282 37878 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1220 02:14:05.865294 37878 kubeadm.go:319]
I1220 02:14:05.865359 37878 kubeadm.go:319] mkdir -p $HOME/.kube
I1220 02:14:05.865464 37878 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1220 02:14:05.865569 37878 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1220 02:14:05.865593 37878 kubeadm.go:319]
I1220 02:14:05.865675 37878 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1220 02:14:05.865685 37878 kubeadm.go:319]
I1220 02:14:05.865781 37878 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1220 02:14:05.865795 37878 kubeadm.go:319]
I1220 02:14:05.865876 37878 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1220 02:14:05.865983 37878 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1220 02:14:05.866079 37878 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1220 02:14:05.866085 37878 kubeadm.go:319]
I1220 02:14:05.866221 37878 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1220 02:14:05.866332 37878 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1220 02:14:05.866337 37878 kubeadm.go:319]
I1220 02:14:05.866459 37878 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 5feew1.aaci0na7tzxpkq74 \
I1220 02:14:05.866573 37878 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:34b132c11c5a062e0480b441f2caac7fcba372b596da4b2c80fd8c00c74704a7 \
I1220 02:14:05.866595 37878 kubeadm.go:319] --control-plane
I1220 02:14:05.866599 37878 kubeadm.go:319]
I1220 02:14:05.866684 37878 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1220 02:14:05.866688 37878 kubeadm.go:319]
I1220 02:14:05.866779 37878 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 5feew1.aaci0na7tzxpkq74 \
I1220 02:14:05.866902 37878 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:34b132c11c5a062e0480b441f2caac7fcba372b596da4b2c80fd8c00c74704a7
I1220 02:14:05.869888 37878 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1220 02:14:05.869959 37878 cni.go:84] Creating CNI manager for "testdata/kube-flannel.yaml"
I1220 02:14:05.871868 37878 out.go:179] * Configuring testdata/kube-flannel.yaml (Container Networking Interface) ...
I1220 02:14:03.004292 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:03.004339 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:03.004352 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:03.004361 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:03.004367 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:03.004374 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:03.004379 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:03.004384 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:03.004389 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:03.004394 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:03.004412 37762 retry.go:31] will retry after 732.081748ms: missing components: kube-dns
I1220 02:14:03.744119 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:03.744161 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:03.744175 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:03.744185 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:03.744191 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:03.744214 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:03.744221 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:03.744227 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:03.744232 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:03.744241 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:03.744273 37762 retry.go:31] will retry after 1.276813322s: missing components: kube-dns
I1220 02:14:05.030079 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:05.030129 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:05.030146 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:05.030161 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:05.030168 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:05.030187 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:05.030194 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:05.030221 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:05.030229 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:05.030235 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:05.030257 37762 retry.go:31] will retry after 1.238453929s: missing components: kube-dns
I1220 02:14:06.275974 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:06.276021 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:06.276033 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:06.276049 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:06.276055 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:06.276061 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:06.276066 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:06.276077 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:06.276083 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:06.276087 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:06.276106 37762 retry.go:31] will retry after 1.908248969s: missing components: kube-dns
I1220 02:14:05.873406 37878 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.3/kubectl ...
I1220 02:14:05.873469 37878 ssh_runner.go:195] Run: stat -c "%s %y" /var/tmp/minikube/cni.yaml
I1220 02:14:05.881393 37878 ssh_runner.go:352] existence check for /var/tmp/minikube/cni.yaml: stat -c "%s %y" /var/tmp/minikube/cni.yaml: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/tmp/minikube/cni.yaml': No such file or directory
I1220 02:14:05.881431 37878 ssh_runner.go:362] scp testdata/kube-flannel.yaml --> /var/tmp/minikube/cni.yaml (4578 bytes)
I1220 02:14:05.936780 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1220 02:14:06.396862 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:06.396880 37878 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1220 02:14:06.396862 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes custom-flannel-503505 minikube.k8s.io/updated_at=2025_12_20T02_14_06_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=7cd9f41b7421760cf1f1eaa8725bdb975037b06d minikube.k8s.io/name=custom-flannel-503505 minikube.k8s.io/primary=true
I1220 02:14:06.630781 37878 ops.go:34] apiserver oom_adj: -16
I1220 02:14:06.630941 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:07.131072 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:07.631526 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:06.351650 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:06.352735 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:06.352774 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:06.353319 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:06.353358 38979 retry.go:31] will retry after 3.225841356s: waiting for domain to come up
I1220 02:14:08.131099 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:08.631429 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:09.131400 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:09.631470 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:10.131821 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:10.631264 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:10.765536 37878 kubeadm.go:1114] duration metric: took 4.368721457s to wait for elevateKubeSystemPrivileges
I1220 02:14:10.765599 37878 kubeadm.go:403] duration metric: took 18.502801612s to StartCluster
I1220 02:14:10.765625 37878 settings.go:142] acquiring lock: {Name:mk57472848b32b0320e862b3ad8a64076ed3d76e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1220 02:14:10.765731 37878 settings.go:150] Updating kubeconfig: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/kubeconfig
I1220 02:14:10.767410 37878 lock.go:35] WriteFile acquiring /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/kubeconfig: {Name:mk7e6532318eb55e3c1811a528040bd41c46d8c7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1220 02:14:10.767716 37878 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1220 02:14:10.767786 37878 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1220 02:14:10.767867 37878 addons.go:70] Setting storage-provisioner=true in profile "custom-flannel-503505"
I1220 02:14:10.767885 37878 addons.go:239] Setting addon storage-provisioner=true in "custom-flannel-503505"
I1220 02:14:10.767747 37878 start.go:236] Will wait 15m0s for node &{Name: IP:192.168.72.110 Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}
I1220 02:14:10.767912 37878 host.go:66] Checking if "custom-flannel-503505" exists ...
I1220 02:14:10.767936 37878 config.go:182] Loaded profile config "custom-flannel-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:14:10.767992 37878 addons.go:70] Setting default-storageclass=true in profile "custom-flannel-503505"
I1220 02:14:10.768006 37878 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "custom-flannel-503505"
I1220 02:14:10.769347 37878 out.go:179] * Verifying Kubernetes components...
I1220 02:14:10.770891 37878 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:10.772643 37878 addons.go:239] Setting addon default-storageclass=true in "custom-flannel-503505"
I1220 02:14:10.772686 37878 host.go:66] Checking if "custom-flannel-503505" exists ...
I1220 02:14:10.772827 37878 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1220 02:14:10.774271 37878 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1220 02:14:10.774291 37878 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1220 02:14:10.775118 37878 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1220 02:14:10.775173 37878 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1220 02:14:10.778715 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.779148 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.779240 37878 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:31:8f:50", ip: ""} in network mk-custom-flannel-503505: {Iface:virbr4 ExpiryTime:2025-12-20 03:13:37 +0000 UTC Type:0 Mac:52:54:00:31:8f:50 Iaid: IPaddr:192.168.72.110 Prefix:24 Hostname:custom-flannel-503505 Clientid:01:52:54:00:31:8f:50}
I1220 02:14:10.779272 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined IP address 192.168.72.110 and MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.779776 37878 sshutil.go:53] new ssh client: &{IP:192.168.72.110 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/custom-flannel-503505/id_rsa Username:docker}
I1220 02:14:10.780325 37878 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:31:8f:50", ip: ""} in network mk-custom-flannel-503505: {Iface:virbr4 ExpiryTime:2025-12-20 03:13:37 +0000 UTC Type:0 Mac:52:54:00:31:8f:50 Iaid: IPaddr:192.168.72.110 Prefix:24 Hostname:custom-flannel-503505 Clientid:01:52:54:00:31:8f:50}
I1220 02:14:10.780367 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined IP address 192.168.72.110 and MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.780605 37878 sshutil.go:53] new ssh client: &{IP:192.168.72.110 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/custom-flannel-503505/id_rsa Username:docker}
I1220 02:14:11.077940 37878 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.72.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1220 02:14:11.193874 37878 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1220 02:14:11.505786 37878 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1220 02:14:11.514993 37878 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1220 02:14:11.665088 37878 start.go:977] {"host.minikube.internal": 192.168.72.1} host record injected into CoreDNS's ConfigMap
I1220 02:14:11.666520 37878 node_ready.go:35] waiting up to 15m0s for node "custom-flannel-503505" to be "Ready" ...
I1220 02:14:12.188508 37878 kapi.go:214] "coredns" deployment in "kube-system" namespace and "custom-flannel-503505" context rescaled to 1 replicas
I1220 02:14:12.198043 37878 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1220 02:14:08.191550 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:08.191589 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:08.191605 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:08.191621 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:08.191627 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:08.191633 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:08.191639 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:08.191645 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:08.191652 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:08.191661 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:08.191680 37762 retry.go:31] will retry after 2.235844761s: missing components: kube-dns
I1220 02:14:10.441962 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:10.442003 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:10.442017 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:10.442028 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:10.442035 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:10.442041 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:10.442048 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:10.442053 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:10.442059 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:10.442063 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:10.442080 37762 retry.go:31] will retry after 3.072193082s: missing components: kube-dns
I1220 02:14:12.199503 37878 addons.go:530] duration metric: took 1.431726471s for enable addons: enabled=[storage-provisioner default-storageclass]
I1220 02:14:09.580950 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:09.581833 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:09.581857 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:09.582327 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:09.582367 38979 retry.go:31] will retry after 3.32332613s: waiting for domain to come up
I1220 02:14:12.910036 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:12.911080 38979 main.go:144] libmachine: domain false-503505 has current primary IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:12.911099 38979 main.go:144] libmachine: found domain IP: 192.168.61.177
I1220 02:14:12.911107 38979 main.go:144] libmachine: reserving static IP address...
I1220 02:14:12.911656 38979 main.go:144] libmachine: unable to find host DHCP lease matching {name: "false-503505", mac: "52:54:00:4e:1e:41", ip: "192.168.61.177"} in network mk-false-503505
I1220 02:14:13.162890 38979 main.go:144] libmachine: reserved static IP address 192.168.61.177 for domain false-503505
I1220 02:14:13.162914 38979 main.go:144] libmachine: waiting for SSH...
I1220 02:14:13.162921 38979 main.go:144] libmachine: Getting to WaitForSSH function...
I1220 02:14:13.166240 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.166798 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:minikube Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.166839 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.167111 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.167442 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.167462 38979 main.go:144] libmachine: About to run SSH command:
exit 0
I1220 02:14:13.287553 38979 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1220 02:14:13.288033 38979 main.go:144] libmachine: domain creation complete
I1220 02:14:13.289768 38979 machine.go:94] provisionDockerMachine start ...
I1220 02:14:13.292967 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.293534 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.293566 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.293831 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.294091 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.294106 38979 main.go:144] libmachine: About to run SSH command:
hostname
I1220 02:14:13.408900 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: minikube
I1220 02:14:13.408931 38979 buildroot.go:166] provisioning hostname "false-503505"
I1220 02:14:13.412183 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.412723 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.412747 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.412990 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.413194 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.413235 38979 main.go:144] libmachine: About to run SSH command:
sudo hostname false-503505 && echo "false-503505" | sudo tee /etc/hostname
I1220 02:14:13.545519 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: false-503505
I1220 02:14:13.548500 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.548973 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.549006 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.549225 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.549497 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.549521 38979 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sfalse-503505' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 false-503505/g' /etc/hosts;
else
echo '127.0.1.1 false-503505' | sudo tee -a /etc/hosts;
fi
fi
I1220 02:14:13.522551 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:13.522594 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:13.522608 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:13.522618 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:13.522624 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:13.522630 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:13.522633 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:13.522638 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:13.522643 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:13.522648 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:13.522671 37762 retry.go:31] will retry after 2.893940025s: missing components: kube-dns
I1220 02:14:16.427761 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:16.427804 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:16.427822 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:16.427834 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:16.427841 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:16.427847 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:16.427857 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:16.427863 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:16.427876 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:16.427881 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:16.427898 37762 retry.go:31] will retry after 5.028189083s: missing components: kube-dns
W1220 02:14:13.671217 37878 node_ready.go:57] node "custom-flannel-503505" has "Ready":"False" status (will retry)
W1220 02:14:16.172759 37878 node_ready.go:57] node "custom-flannel-503505" has "Ready":"False" status (will retry)
I1220 02:14:13.683279 38979 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1220 02:14:13.683320 38979 buildroot.go:172] set auth options {CertDir:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube CaCertPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem CaPrivateKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server.pem ServerKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server-key.pem ClientKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minik
ube/certs/cert.pem ServerCertSANs:[] StorePath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube}
I1220 02:14:13.683376 38979 buildroot.go:174] setting up certificates
I1220 02:14:13.683393 38979 provision.go:84] configureAuth start
I1220 02:14:13.687478 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.688091 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.688126 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.691975 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.692656 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.692715 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.692969 38979 provision.go:143] copyHostCerts
I1220 02:14:13.693049 38979 exec_runner.go:144] found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/ca.pem, removing ...
I1220 02:14:13.693064 38979 exec_runner.go:203] rm: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/ca.pem
I1220 02:14:13.693154 38979 exec_runner.go:151] cp: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem --> /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/ca.pem (1082 bytes)
I1220 02:14:13.693360 38979 exec_runner.go:144] found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cert.pem, removing ...
I1220 02:14:13.693377 38979 exec_runner.go:203] rm: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cert.pem
I1220 02:14:13.693441 38979 exec_runner.go:151] cp: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/cert.pem --> /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cert.pem (1127 bytes)
I1220 02:14:13.693548 38979 exec_runner.go:144] found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/key.pem, removing ...
I1220 02:14:13.693560 38979 exec_runner.go:203] rm: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/key.pem
I1220 02:14:13.693612 38979 exec_runner.go:151] cp: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/key.pem --> /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/key.pem (1675 bytes)
I1220 02:14:13.693705 38979 provision.go:117] generating server cert: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server.pem ca-key=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem private-key=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca-key.pem org=minitest.false-503505 san=[127.0.0.1 192.168.61.177 false-503505 localhost minikube]
I1220 02:14:13.709086 38979 provision.go:177] copyRemoteCerts
I1220 02:14:13.709144 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1220 02:14:13.713124 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.713703 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.713755 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.713967 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:13.809584 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1220 02:14:13.845246 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I1220 02:14:13.881465 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1220 02:14:13.915284 38979 provision.go:87] duration metric: took 231.876161ms to configureAuth
I1220 02:14:13.915334 38979 buildroot.go:189] setting minikube options for container-runtime
I1220 02:14:13.915608 38979 config.go:182] Loaded profile config "false-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:14:13.919150 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.919807 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.919851 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.920156 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.920492 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.920559 38979 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1220 02:14:14.043505 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: tmpfs
I1220 02:14:14.043553 38979 buildroot.go:70] root file system type: tmpfs
I1220 02:14:14.043717 38979 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1220 02:14:14.047676 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.048130 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:14.048163 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.048457 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:14.048704 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:14.048784 38979 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1220 02:14:14.192756 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1220 02:14:14.196528 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.197071 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:14.197103 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.197379 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:14.197658 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:14.197687 38979 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1220 02:14:15.322369 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1220 02:14:15.322395 38979 machine.go:97] duration metric: took 2.032605943s to provisionDockerMachine
I1220 02:14:15.322407 38979 client.go:176] duration metric: took 21.59897051s to LocalClient.Create
I1220 02:14:15.322422 38979 start.go:167] duration metric: took 21.599041943s to libmachine.API.Create "false-503505"
I1220 02:14:15.322430 38979 start.go:293] postStartSetup for "false-503505" (driver="kvm2")
I1220 02:14:15.322443 38979 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1220 02:14:15.322513 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1220 02:14:15.325726 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.326187 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.326227 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.326423 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:15.421695 38979 ssh_runner.go:195] Run: cat /etc/os-release
I1220 02:14:15.426952 38979 info.go:137] Remote host: Buildroot 2025.02
I1220 02:14:15.426987 38979 filesync.go:126] Scanning /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/addons for local assets ...
I1220 02:14:15.427077 38979 filesync.go:126] Scanning /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/files for local assets ...
I1220 02:14:15.427228 38979 filesync.go:149] local asset: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/files/etc/ssl/certs/130182.pem -> 130182.pem in /etc/ssl/certs
I1220 02:14:15.427399 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1220 02:14:15.440683 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/files/etc/ssl/certs/130182.pem --> /etc/ssl/certs/130182.pem (1708 bytes)
I1220 02:14:15.472751 38979 start.go:296] duration metric: took 150.304753ms for postStartSetup
I1220 02:14:15.476375 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.476839 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.476864 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.477147 38979 profile.go:143] Saving config to /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/false-503505/config.json ...
I1220 02:14:15.477371 38979 start.go:128] duration metric: took 21.756169074s to createHost
I1220 02:14:15.480134 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.480583 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.480606 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.480814 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:15.481047 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:15.481060 38979 main.go:144] libmachine: About to run SSH command:
date +%s.%N
I1220 02:14:15.603682 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: 1766196855.575822881
I1220 02:14:15.603714 38979 fix.go:216] guest clock: 1766196855.575822881
I1220 02:14:15.603726 38979 fix.go:229] Guest: 2025-12-20 02:14:15.575822881 +0000 UTC Remote: 2025-12-20 02:14:15.477389482 +0000 UTC m=+21.885083527 (delta=98.433399ms)
I1220 02:14:15.603749 38979 fix.go:200] guest clock delta is within tolerance: 98.433399ms
I1220 02:14:15.603770 38979 start.go:83] releasing machines lock for "false-503505", held for 21.882663608s
I1220 02:14:15.607369 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.607986 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.608024 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.608687 38979 ssh_runner.go:195] Run: cat /version.json
I1220 02:14:15.608792 38979 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1220 02:14:15.612782 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.613294 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.613342 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.613436 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.613556 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:15.614074 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.614107 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.614392 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:15.700660 38979 ssh_runner.go:195] Run: systemctl --version
I1220 02:14:15.725011 38979 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1220 02:14:15.731935 38979 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1220 02:14:15.732099 38979 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *bridge* -not -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "10.244.0.0/16"\2|g' {}" ;
I1220 02:14:15.744444 38979 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "10.244.0.0/16"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "10.244.0.1"\2|g' {}" ;
I1220 02:14:15.768292 38979 cni.go:308] configured [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1220 02:14:15.768338 38979 start.go:496] detecting cgroup driver to use...
I1220 02:14:15.768490 38979 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1220 02:14:15.808234 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1220 02:14:15.830328 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1220 02:14:15.848439 38979 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1220 02:14:15.848537 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1220 02:14:15.865682 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1220 02:14:15.887500 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1220 02:14:15.906005 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1220 02:14:15.925461 38979 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1220 02:14:15.940692 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1220 02:14:15.959326 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1220 02:14:15.978291 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1220 02:14:15.997878 38979 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1220 02:14:16.014027 38979 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1220 02:14:16.014121 38979 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1220 02:14:16.033465 38979 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1220 02:14:16.050354 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:16.231792 38979 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1220 02:14:16.289416 38979 start.go:496] detecting cgroup driver to use...
I1220 02:14:16.289528 38979 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1220 02:14:16.314852 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1220 02:14:16.343915 38979 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1220 02:14:16.373499 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1220 02:14:16.393749 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1220 02:14:16.415218 38979 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1220 02:14:16.448678 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1220 02:14:16.471638 38979 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1220 02:14:16.499850 38979 ssh_runner.go:195] Run: which cri-dockerd
I1220 02:14:16.505358 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1220 02:14:16.518773 38979 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1220 02:14:16.542267 38979 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1220 02:14:16.744157 38979 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1220 02:14:16.924495 38979 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1220 02:14:16.924658 38979 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1220 02:14:16.953858 38979 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1220 02:14:16.973889 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:17.180489 38979 ssh_runner.go:195] Run: sudo systemctl restart docker
I1220 02:14:17.720891 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1220 02:14:17.740432 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1220 02:14:17.756728 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1220 02:14:17.780803 38979 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1220 02:14:17.958835 38979 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1220 02:14:18.121422 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:18.283915 38979 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1220 02:14:18.319068 38979 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1220 02:14:18.334630 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:18.486080 38979 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1220 02:14:18.616715 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1220 02:14:18.643324 38979 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1220 02:14:18.643397 38979 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1220 02:14:18.649921 38979 start.go:564] Will wait 60s for crictl version
I1220 02:14:18.649987 38979 ssh_runner.go:195] Run: which crictl
I1220 02:14:18.655062 38979 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1220 02:14:18.692451 38979 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.2
RuntimeApiVersion: v1
I1220 02:14:18.692517 38979 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1220 02:14:18.725655 38979 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
==> Docker <==
Dec 20 02:13:25 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:13:25Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/24384c9b6386768f183a17a14b0915b4c06115ceca79b379c9a8caeb87ac9be2/resolv.conf as [nameserver 10.96.0.10 search kubernetes-dashboard.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Dec 20 02:13:26 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:26.077359482Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
Dec 20 02:13:33 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:13:33Z" level=info msg="Stop pulling image docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93: Status: Downloaded newer image for kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.166995649Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.247637303Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.247742747Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 20 02:13:33 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:13:33Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.870943978Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.870972001Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.874954248Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.875104860Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:13:46 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:46.013938388Z" level=error msg="Handler for POST /v1.51/containers/e389ed009c41/pause returned error: cannot pause container e389ed009c414813f08a16331049a1f7b81ae99102e1d3eee00456652f70d78e: OCI runtime pause failed: container not running"
Dec 20 02:13:46 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:46.096234565Z" level=info msg="ignoring event" container=e389ed009c414813f08a16331049a1f7b81ae99102e1d3eee00456652f70d78e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 20 02:14:19 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:19Z" level=error msg="error getting RW layer size for container ID 'f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833': Error response from daemon: No such container: f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833"
Dec 20 02:14:19 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:19Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833'"
Dec 20 02:14:20 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:20Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-j9fnc_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"c17f03aae9a804c2000dd7a7f2df0a5c0e11cb7cc45d2898ceeb917e335ab8a6\""
Dec 20 02:14:20 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:20Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.054814750Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.173663258Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.173805989Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 20 02:14:21 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:21Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.210054061Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.210106510Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.216155700Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.216230216Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
db82439a82773 6e38f40d628db 1 second ago Running storage-provisioner 2 b98cac4df9b58 storage-provisioner kube-system
3d0dc5e4eaf53 kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 48 seconds ago Running kubernetes-dashboard 0 c7214caee965e kubernetes-dashboard-855c9754f9-v5f62 kubernetes-dashboard
bd3af300e51d6 56cc512116c8f 58 seconds ago Running busybox 1 620275c9345e0 busybox default
c9a7560c3855f 52546a367cc9e 58 seconds ago Running coredns 1 bd05cab39e53f coredns-66bc5c9577-gjmjk kube-system
e389ed009c414 6e38f40d628db About a minute ago Exited storage-provisioner 1 b98cac4df9b58 storage-provisioner kube-system
8a1598184096c 36eef8e07bdd6 About a minute ago Running kube-proxy 1 fceaaba1c1db3 kube-proxy-22tlj kube-system
2808d78b661f8 aec12dadf56dd About a minute ago Running kube-scheduler 1 6d3fddf7afe4b kube-scheduler-default-k8s-diff-port-032958 kube-system
5d487135b34c5 a3e246e9556e9 About a minute ago Running etcd 1 57ad4b77ed607 etcd-default-k8s-diff-port-032958 kube-system
0be7d44211125 5826b25d990d7 About a minute ago Running kube-controller-manager 1 f7e02a8a528fa kube-controller-manager-default-k8s-diff-port-032958 kube-system
799ae6e77e4dc aa27095f56193 About a minute ago Running kube-apiserver 1 c0277aff9f306 kube-apiserver-default-k8s-diff-port-032958 kube-system
9a4671ba050b2 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 9bfc558dcff48 busybox default
aef0cd5a3775d 52546a367cc9e 2 minutes ago Exited coredns 0 4e8574a6b885b coredns-66bc5c9577-gjmjk kube-system
696c72bae65f2 36eef8e07bdd6 2 minutes ago Exited kube-proxy 0 959487a2071a7 kube-proxy-22tlj kube-system
37cee352777b9 aa27095f56193 3 minutes ago Exited kube-apiserver 0 042ea7540f943 kube-apiserver-default-k8s-diff-port-032958 kube-system
6955eb7dbb7a8 a3e246e9556e9 3 minutes ago Exited etcd 0 1ae4fd44c2900 etcd-default-k8s-diff-port-032958 kube-system
bc3e91d6c19d6 5826b25d990d7 3 minutes ago Exited kube-controller-manager 0 ec2c7b618f7f7 kube-controller-manager-default-k8s-diff-port-032958 kube-system
44fb178dfab72 aec12dadf56dd 3 minutes ago Exited kube-scheduler 0 010ff1a843791 kube-scheduler-default-k8s-diff-port-032958 kube-system
==> coredns [aef0cd5a3775] <==
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
[INFO] Reloading complete
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [c9a7560c3855] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:39548 - 58159 "HINFO IN 6794078486954714189.4770737732440681574. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.045655293s
==> describe nodes <==
Name: default-k8s-diff-port-032958
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=default-k8s-diff-port-032958
kubernetes.io/os=linux
minikube.k8s.io/commit=7cd9f41b7421760cf1f1eaa8725bdb975037b06d
minikube.k8s.io/name=default-k8s-diff-port-032958
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_12_20T02_11_24_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 20 Dec 2025 02:11:20 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: default-k8s-diff-port-032958
AcquireTime: <unset>
RenewTime: Sat, 20 Dec 2025 02:14:19 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:11:18 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:11:18 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:11:18 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:13:24 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.83.139
Hostname: default-k8s-diff-port-032958
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: a22ece73f0a74620b511d2c9063270d7
System UUID: a22ece73-f0a7-4620-b511-d2c9063270d7
Boot ID: 3a1ecf6e-4165-4ac3-94cb-43972902c57c
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.2
Kubelet Version: v1.34.3
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m17s
kube-system coredns-66bc5c9577-gjmjk 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m53s
kube-system etcd-default-k8s-diff-port-032958 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m57s
kube-system kube-apiserver-default-k8s-diff-port-032958 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m58s
kube-system kube-controller-manager-default-k8s-diff-port-032958 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m57s
kube-system kube-proxy-22tlj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m53s
kube-system kube-scheduler-default-k8s-diff-port-032958 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m57s
kube-system metrics-server-746fcd58dc-r9hzl 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 2m7s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m50s
kubernetes-dashboard dashboard-metrics-scraper-6ffb444bf9-wzcc7 0 (0%) 0 (0%) 0 (0%) 0 (0%) 62s
kubernetes-dashboard kubernetes-dashboard-855c9754f9-v5f62 0 (0%) 0 (0%) 0 (0%) 0 (0%) 62s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m51s kube-proxy
Normal Starting 65s kube-proxy
Normal NodeAllocatableEnforced 3m5s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 3m5s (x8 over 3m5s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 3m5s (x8 over 3m5s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 3m5s (x7 over 3m5s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
Normal Starting 3m5s kubelet Starting kubelet.
Normal Starting 2m58s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 2m58s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2m57s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m57s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m57s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
Normal RegisteredNode 2m54s node-controller Node default-k8s-diff-port-032958 event: Registered Node default-k8s-diff-port-032958 in Controller
Normal NodeReady 2m53s kubelet Node default-k8s-diff-port-032958 status is now: NodeReady
Normal Starting 72s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 72s (x8 over 72s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 72s (x8 over 72s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 72s (x7 over 72s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 72s kubelet Updated Node Allocatable limit across pods
Warning Rebooted 67s kubelet Node default-k8s-diff-port-032958 has been rebooted, boot id: 3a1ecf6e-4165-4ac3-94cb-43972902c57c
Normal RegisteredNode 63s node-controller Node default-k8s-diff-port-032958 event: Registered Node default-k8s-diff-port-032958 in Controller
Normal Starting 2s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 1s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 1s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 1s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 1s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
==> dmesg <==
[Dec20 02:12] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.000038] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.003240] (rpcbind)[120]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.994669] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000027] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.151734] kauditd_printk_skb: 1 callbacks suppressed
[Dec20 02:13] kauditd_printk_skb: 393 callbacks suppressed
[ +0.106540] kauditd_printk_skb: 46 callbacks suppressed
[ +5.723521] kauditd_printk_skb: 165 callbacks suppressed
[ +3.591601] kauditd_printk_skb: 134 callbacks suppressed
[ +0.607561] kauditd_printk_skb: 259 callbacks suppressed
[ +0.307946] kauditd_printk_skb: 17 callbacks suppressed
[Dec20 02:14] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [5d487135b34c] <==
{"level":"warn","ts":"2025-12-20T02:13:13.342109Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50660","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.352027Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50684","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.369107Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50704","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.385882Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50724","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.394142Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50748","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.401109Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50766","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.409984Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50786","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.418468Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50796","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.428572Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50810","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.434912Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50852","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.445522Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50868","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.454332Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50884","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.465435Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50906","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.483111Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50914","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.490648Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50946","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.499438Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50956","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.572659Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50980","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:29.160598Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"521.564224ms","expected-duration":"100ms","prefix":"","request":"header:<ID:13052446816451747392 > lease_revoke:<id:35239b39868e0a7a>","response":"size:28"}
{"level":"info","ts":"2025-12-20T02:13:29.161474Z","caller":"traceutil/trace.go:172","msg":"trace[1265080339] linearizableReadLoop","detail":"{readStateIndex:772; appliedIndex:771; }","duration":"411.486582ms","start":"2025-12-20T02:13:28.749972Z","end":"2025-12-20T02:13:29.161458Z","steps":["trace[1265080339] 'read index received' (duration: 33.594µs)","trace[1265080339] 'applied index is now lower than readState.Index' (duration: 411.451844ms)"],"step_count":2}
{"level":"warn","ts":"2025-12-20T02:13:29.161591Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"411.631732ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-20T02:13:29.161613Z","caller":"traceutil/trace.go:172","msg":"trace[1600534378] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:727; }","duration":"411.662139ms","start":"2025-12-20T02:13:28.749943Z","end":"2025-12-20T02:13:29.161605Z","steps":["trace[1600534378] 'agreement among raft nodes before linearized reading' (duration: 411.61436ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:13:29.162719Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"311.7284ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-032958\" limit:1 ","response":"range_response_count:1 size:5168"}
{"level":"info","ts":"2025-12-20T02:13:29.163046Z","caller":"traceutil/trace.go:172","msg":"trace[1971843700] range","detail":"{range_begin:/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-032958; range_end:; response_count:1; response_revision:727; }","duration":"312.095123ms","start":"2025-12-20T02:13:28.850939Z","end":"2025-12-20T02:13:29.163034Z","steps":["trace[1971843700] 'agreement among raft nodes before linearized reading' (duration: 311.117462ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:13:29.163083Z","caller":"v3rpc/interceptor.go:202","msg":"request stats","start time":"2025-12-20T02:13:28.850904Z","time spent":"312.166241ms","remote":"127.0.0.1:50178","response type":"/etcdserverpb.KV/Range","request count":0,"request size":74,"response count":1,"response size":5191,"request content":"key:\"/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-032958\" limit:1 "}
{"level":"info","ts":"2025-12-20T02:13:30.222290Z","caller":"traceutil/trace.go:172","msg":"trace[252289306] transaction","detail":"{read_only:false; response_revision:728; number_of_response:1; }","duration":"269.402974ms","start":"2025-12-20T02:13:29.952867Z","end":"2025-12-20T02:13:30.222270Z","steps":["trace[252289306] 'process raft request' (duration: 269.235053ms)"],"step_count":1}
==> etcd [6955eb7dbb7a] <==
{"level":"warn","ts":"2025-12-20T02:11:19.847959Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60060","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:11:19.949918Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60078","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-12-20T02:12:07.244895Z","caller":"traceutil/trace.go:172","msg":"trace[798266364] linearizableReadLoop","detail":"{readStateIndex:511; appliedIndex:511; }","duration":"213.060794ms","start":"2025-12-20T02:12:07.031794Z","end":"2025-12-20T02:12:07.244854Z","steps":["trace[798266364] 'read index received' (duration: 213.055389ms)","trace[798266364] 'applied index is now lower than readState.Index' (duration: 4.109µs)"],"step_count":2}
{"level":"info","ts":"2025-12-20T02:12:07.245037Z","caller":"traceutil/trace.go:172","msg":"trace[286472680] transaction","detail":"{read_only:false; response_revision:494; number_of_response:1; }","duration":"297.72601ms","start":"2025-12-20T02:12:06.947300Z","end":"2025-12-20T02:12:07.245026Z","steps":["trace[286472680] 'process raft request' (duration: 297.578574ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:12:07.245042Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"213.193567ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-20T02:12:07.245100Z","caller":"traceutil/trace.go:172","msg":"trace[1257312239] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:493; }","duration":"213.303974ms","start":"2025-12-20T02:12:07.031787Z","end":"2025-12-20T02:12:07.245091Z","steps":["trace[1257312239] 'agreement among raft nodes before linearized reading' (duration: 213.173447ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:12:08.425636Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"189.814071ms","expected-duration":"100ms","prefix":"","request":"header:<ID:13052446816422726242 > lease_revoke:<id:35239b39868e09cb>","response":"size:28"}
{"level":"info","ts":"2025-12-20T02:12:15.681646Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-12-20T02:12:15.681764Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"default-k8s-diff-port-032958","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.139:2380"],"advertise-client-urls":["https://192.168.83.139:2379"]}
{"level":"error","ts":"2025-12-20T02:12:15.681878Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-20T02:12:22.684233Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-20T02:12:22.686809Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-20T02:12:22.686860Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"911810311894b523","current-leader-member-id":"911810311894b523"}
{"level":"info","ts":"2025-12-20T02:12:22.687961Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
{"level":"info","ts":"2025-12-20T02:12:22.688006Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"warn","ts":"2025-12-20T02:12:22.691490Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-20T02:12:22.691626Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-20T02:12:22.691850Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-12-20T02:12:22.692143Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.83.139:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-20T02:12:22.692250Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.83.139:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-20T02:12:22.692290Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.139:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-20T02:12:22.695968Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.83.139:2380"}
{"level":"error","ts":"2025-12-20T02:12:22.696039Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.139:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-20T02:12:22.696144Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.83.139:2380"}
{"level":"info","ts":"2025-12-20T02:12:22.696154Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"default-k8s-diff-port-032958","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.139:2380"],"advertise-client-urls":["https://192.168.83.139:2379"]}
==> kernel <==
02:14:21 up 1 min, 0 users, load average: 0.75, 0.36, 0.13
Linux default-k8s-diff-port-032958 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Dec 17 12:49:57 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [37cee352777b] <==
W1220 02:12:24.853572 1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:24.853799 1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:24.886374 1 logging.go:55] [core] [Channel #87 SubChannel #89]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:24.979017 1 logging.go:55] [core] [Channel #9 SubChannel #11]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.000084 1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.002681 1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.055296 1 logging.go:55] [core] [Channel #219 SubChannel #221]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.076421 1 logging.go:55] [core] [Channel #75 SubChannel #77]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.117381 1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.121093 1 logging.go:55] [core] [Channel #183 SubChannel #185]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.174493 1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.192366 1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.290406 1 logging.go:55] [core] [Channel #262 SubChannel #263]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.339079 1 logging.go:55] [core] [Channel #103 SubChannel #105]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.360110 1 logging.go:55] [core] [Channel #131 SubChannel #133]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.370079 1 logging.go:55] [core] [Channel #111 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.387509 1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.392242 1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.418164 1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.463768 1 logging.go:55] [core] [Channel #119 SubChannel #121]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.491271 1 logging.go:55] [core] [Channel #231 SubChannel #233]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.551484 1 logging.go:55] [core] [Channel #4 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.672624 1 logging.go:55] [core] [Channel #171 SubChannel #173]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.708145 1 logging.go:55] [core] [Channel #203 SubChannel #205]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.738375 1 logging.go:55] [core] [Channel #211 SubChannel #213]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [799ae6e77e4d] <==
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1220 02:13:15.375295 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
E1220 02:13:15.375659 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1220 02:13:15.376472 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1220 02:13:16.429720 1 handler.go:285] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
I1220 02:13:17.061950 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1220 02:13:17.106313 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1220 02:13:17.143082 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1220 02:13:17.149304 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1220 02:13:18.957068 1 controller.go:667] quota admission added evaluator for: endpoints
I1220 02:13:18.991435 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1220 02:13:19.164812 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1220 02:13:19.310960 1 controller.go:667] quota admission added evaluator for: namespaces
I1220 02:13:19.735545 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.100.251.44"}
I1220 02:13:19.755566 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.100.131.142"}
W1220 02:14:18.888986 1 handler_proxy.go:99] no RequestInfo found in the context
E1220 02:14:18.889066 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1220 02:14:18.889082 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1220 02:14:18.898096 1 handler_proxy.go:99] no RequestInfo found in the context
E1220 02:14:18.898161 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1220 02:14:18.898178 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [0be7d4421112] <==
I1220 02:13:18.949472 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I1220 02:13:18.949789 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I1220 02:13:18.911345 1 shared_informer.go:356] "Caches are synced" controller="GC"
I1220 02:13:18.951606 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I1220 02:13:18.954539 1 shared_informer.go:356] "Caches are synced" controller="job"
I1220 02:13:18.928851 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:13:18.929424 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I1220 02:13:18.967844 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:13:18.972782 1 shared_informer.go:356] "Caches are synced" controller="stateful set"
I1220 02:13:18.972915 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:13:18.972963 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1220 02:13:18.972974 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1220 02:13:18.978649 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:13:19.003577 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
E1220 02:13:19.507058 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.557712 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.579564 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.584826 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.605680 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.607173 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.612896 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.619443 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1220 02:13:28.942844 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
E1220 02:14:18.972742 1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
I1220 02:14:19.019968 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
==> kube-controller-manager [bc3e91d6c19d] <==
I1220 02:11:27.795191 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1220 02:11:27.795197 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1220 02:11:27.795206 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I1220 02:11:27.795417 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I1220 02:11:27.804009 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I1220 02:11:27.809262 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="default-k8s-diff-port-032958" podCIDRs=["10.244.0.0/24"]
I1220 02:11:27.814727 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I1220 02:11:27.816005 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:11:27.818197 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I1220 02:11:27.827800 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:11:27.835424 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1220 02:11:27.835598 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1220 02:11:27.835777 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="default-k8s-diff-port-032958"
I1220 02:11:27.835796 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
I1220 02:11:27.835833 1 node_lifecycle_controller.go:1025] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
I1220 02:11:27.835932 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:11:27.835939 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1220 02:11:27.835945 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1220 02:11:27.838763 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I1220 02:11:27.838905 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I1220 02:11:27.838920 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I1220 02:11:27.843443 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I1220 02:11:27.845334 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1220 02:11:27.853111 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:11:32.836931 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
==> kube-proxy [696c72bae65f] <==
I1220 02:11:30.533772 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1220 02:11:30.634441 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1220 02:11:30.634675 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.139"]
E1220 02:11:30.635231 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1220 02:11:30.763679 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1220 02:11:30.764391 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1220 02:11:30.764588 1 server_linux.go:132] "Using iptables Proxier"
I1220 02:11:30.801765 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1220 02:11:30.802104 1 server.go:527] "Version info" version="v1.34.3"
I1220 02:11:30.802116 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1220 02:11:30.821963 1 config.go:309] "Starting node config controller"
I1220 02:11:30.822050 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1220 02:11:30.822061 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1220 02:11:30.826798 1 config.go:200] "Starting service config controller"
I1220 02:11:30.826954 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1220 02:11:30.829847 1 config.go:106] "Starting endpoint slice config controller"
I1220 02:11:30.830754 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1220 02:11:30.831058 1 config.go:403] "Starting serviceCIDR config controller"
I1220 02:11:30.831070 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1220 02:11:30.937234 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1220 02:11:30.937307 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1220 02:11:30.933586 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-proxy [8a1598184096] <==
I1220 02:13:16.000300 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1220 02:13:16.100699 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1220 02:13:16.100734 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.139"]
E1220 02:13:16.100793 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1220 02:13:16.145133 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1220 02:13:16.145459 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1220 02:13:16.145683 1 server_linux.go:132] "Using iptables Proxier"
I1220 02:13:16.156575 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1220 02:13:16.157810 1 server.go:527] "Version info" version="v1.34.3"
I1220 02:13:16.158021 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1220 02:13:16.162964 1 config.go:200] "Starting service config controller"
I1220 02:13:16.162999 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1220 02:13:16.163014 1 config.go:106] "Starting endpoint slice config controller"
I1220 02:13:16.163018 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1220 02:13:16.163027 1 config.go:403] "Starting serviceCIDR config controller"
I1220 02:13:16.163030 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1220 02:13:16.166161 1 config.go:309] "Starting node config controller"
I1220 02:13:16.166330 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1220 02:13:16.166459 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1220 02:13:16.263219 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1220 02:13:16.263310 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1220 02:13:16.263327 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-scheduler [2808d78b661f] <==
I1220 02:13:12.086051 1 serving.go:386] Generated self-signed cert in-memory
I1220 02:13:14.442280 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.3"
I1220 02:13:14.442329 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1220 02:13:14.455168 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController
I1220 02:13:14.455457 1 shared_informer.go:349] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController"
I1220 02:13:14.455686 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:13:14.455746 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:13:14.455761 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file"
I1220 02:13:14.455884 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file"
I1220 02:13:14.456412 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1220 02:13:14.456890 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1220 02:13:14.556446 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file"
I1220 02:13:14.556930 1 shared_informer.go:356] "Caches are synced" controller="RequestHeaderAuthRequestController"
I1220 02:13:14.557255 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kube-scheduler [44fb178dfab7] <==
E1220 02:11:20.961299 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1220 02:11:20.964924 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1220 02:11:20.965293 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1220 02:11:20.965512 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1220 02:11:21.832650 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1220 02:11:21.832947 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1220 02:11:21.847314 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1220 02:11:21.848248 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1220 02:11:21.883915 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1220 02:11:21.922925 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E1220 02:11:21.948354 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1220 02:11:21.988956 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1220 02:11:22.072320 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1220 02:11:22.112122 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1220 02:11:22.125974 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1220 02:11:22.146170 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1220 02:11:22.171595 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1220 02:11:22.226735 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
I1220 02:11:25.144517 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:12:15.706842 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1220 02:12:15.706898 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1220 02:12:15.706917 1 tlsconfig.go:258] "Shutting down DynamicServingCertificateController"
I1220 02:12:15.706972 1 configmap_cafile_content.go:226] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:12:15.707164 1 server.go:265] "[graceful-termination] secure server is exiting"
E1220 02:12:15.707186 1 run.go:72] "command failed" err="finished without leader elect"
==> kubelet <==
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.306089 4206 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="959487a2071a7d265b217d3aee2b7e4fbafb02bb0585f7ff40beae30aa17b725"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.330357 4206 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ae4fd44c29005031aebaf78608172fd0e41f69bee4dd72c3ea114e035fc7e8e"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.330548 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:20.342746 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"etcd-default-k8s-diff-port-032958\" already exists" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.617352 4206 apiserver.go:52] "Watching apiserver"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.683834 4206 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.742502 4206 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/07a41d99-89a6-4d25-b7cf-57f49fbdea5a-lib-modules\") pod \"kube-proxy-22tlj\" (UID: \"07a41d99-89a6-4d25-b7cf-57f49fbdea5a\") " pod="kube-system/kube-proxy-22tlj"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.743177 4206 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/07a41d99-89a6-4d25-b7cf-57f49fbdea5a-xtables-lock\") pod \"kube-proxy-22tlj\" (UID: \"07a41d99-89a6-4d25-b7cf-57f49fbdea5a\") " pod="kube-system/kube-proxy-22tlj"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.743223 4206 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/a74ca514-b136-40a6-9fd7-27c96e23bca7-tmp\") pod \"storage-provisioner\" (UID: \"a74ca514-b136-40a6-9fd7-27c96e23bca7\") " pod="kube-system/storage-provisioner"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.941330 4206 scope.go:117] "RemoveContainer" containerID="e389ed009c414813f08a16331049a1f7b81ae99102e1d3eee00456652f70d78e"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.187714 4206 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.188551 4206 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.190144 4206 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-wzcc7_kubernetes-dashboard(6951d269-7815-46e0-bfd0-c9dba02d7a47): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.191545 4206 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-wzcc7" podUID="6951d269-7815-46e0-bfd0-c9dba02d7a47"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.218131 4206 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.218192 4206 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.218346 4206 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-r9hzl_kube-system(ea98af6d-2555-48e1-9403-91cdbace7b1c): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.219866 4206 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-r9hzl" podUID="ea98af6d-2555-48e1-9403-91cdbace7b1c"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.415555 4206 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.445678 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.445968 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.446323 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.476713 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"kube-scheduler-default-k8s-diff-port-032958\" already exists" pod="kube-system/kube-scheduler-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.478173 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"etcd-default-k8s-diff-port-032958\" already exists" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.479470 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"kube-apiserver-default-k8s-diff-port-032958\" already exists" pod="kube-system/kube-apiserver-default-k8s-diff-port-032958"
==> kubernetes-dashboard [3d0dc5e4eaf5] <==
2025/12/20 02:13:33 Using namespace: kubernetes-dashboard
2025/12/20 02:13:33 Using in-cluster config to connect to apiserver
2025/12/20 02:13:33 Using secret token for csrf signing
2025/12/20 02:13:33 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/12/20 02:13:33 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/12/20 02:13:33 Successful initial request to the apiserver, version: v1.34.3
2025/12/20 02:13:33 Generating JWE encryption key
2025/12/20 02:13:33 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/12/20 02:13:33 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/12/20 02:13:33 Initializing JWE encryption key from synchronized object
2025/12/20 02:13:33 Creating in-cluster Sidecar client
2025/12/20 02:13:33 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/12/20 02:13:33 Serving insecurely on HTTP port: 9090
2025/12/20 02:14:19 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/12/20 02:13:33 Starting overwatch
==> storage-provisioner [db82439a8277] <==
I1220 02:14:21.324067 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1220 02:14:21.372255 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1220 02:14:21.373462 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1220 02:14:21.382159 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
==> storage-provisioner [e389ed009c41] <==
I1220 02:13:15.862545 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1220 02:13:45.872285 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
I1220 02:14:24.115175 13018 config.go:182] Loaded profile config "custom-flannel-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
helpers_test.go:270: (dbg) Run: kubectl --context default-k8s-diff-port-032958 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:281: non-running pods: metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7
helpers_test.go:283: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: describe non-running pods <======
helpers_test.go:286: (dbg) Run: kubectl --context default-k8s-diff-port-032958 describe pod metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7
helpers_test.go:286: (dbg) Non-zero exit: kubectl --context default-k8s-diff-port-032958 describe pod metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7: exit status 1 (88.623563ms)
** stderr **
Error from server (NotFound): pods "metrics-server-746fcd58dc-r9hzl" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-wzcc7" not found
** /stderr **
helpers_test.go:288: kubectl --context default-k8s-diff-port-032958 describe pod metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7: exit status 1
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
helpers_test.go:253: <<< TestStartStop/group/default-k8s-diff-port/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p default-k8s-diff-port-032958 logs -n 25
helpers_test.go:256: (dbg) Done: out/minikube-linux-amd64 -p default-k8s-diff-port-032958 logs -n 25: (1.589780324s)
helpers_test.go:261: TestStartStop/group/default-k8s-diff-port/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬──────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼──────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ -p kindnet-503505 sudo journalctl -xeu kubelet --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/kubernetes/kubelet.conf │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /var/lib/kubelet/config.yaml │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status docker --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl cat docker --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/docker/daemon.json │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo docker system info │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status cri-docker --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl cat cri-docker --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /usr/lib/systemd/system/cri-docker.service │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cri-dockerd --version │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status containerd --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl cat containerd --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /lib/systemd/system/containerd.service │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo cat /etc/containerd/config.toml │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo containerd config dump │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo systemctl status crio --all --full --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ │
│ ssh │ -p kindnet-503505 sudo systemctl cat crio --no-pager │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ ssh │ -p kindnet-503505 sudo crio config │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ delete │ -p kindnet-503505 │ kindnet-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ 20 Dec 25 02:13 UTC │
│ start │ -p false-503505 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=kvm2 --container-runtime=docker │ false-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:13 UTC │ │
│ unpause │ -p default-k8s-diff-port-032958 --alsologtostderr -v=1 │ default-k8s-diff-port-032958 │ minitest │ v1.37.0 │ 20 Dec 25 02:14 UTC │ 20 Dec 25 02:14 UTC │
│ ssh │ -p custom-flannel-503505 pgrep -a kubelet │ custom-flannel-503505 │ minitest │ v1.37.0 │ 20 Dec 25 02:14 UTC │ 20 Dec 25 02:14 UTC │
└─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴──────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/20 02:13:53
Running on machine: minitest-vm-9d09530a
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1220 02:13:53.658426 38979 out.go:360] Setting OutFile to fd 1 ...
I1220 02:13:53.658597 38979 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1220 02:13:53.658612 38979 out.go:374] Setting ErrFile to fd 2...
I1220 02:13:53.658620 38979 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1220 02:13:53.658880 38979 root.go:338] Updating PATH: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/bin
I1220 02:13:53.659482 38979 out.go:368] Setting JSON to false
I1220 02:13:53.660578 38979 start.go:133] hostinfo: {"hostname":"minitest-vm-9d09530a.c.k8s-infra-e2e-boskos-103.internal","uptime":3547,"bootTime":1766193287,"procs":209,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"24.04","kernelVersion":"6.14.0-1021-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"324b1d65-3a78-4886-9ab4-95ed3c96a31c"}
I1220 02:13:53.660687 38979 start.go:143] virtualization: kvm guest
I1220 02:13:53.662866 38979 out.go:179] * [false-503505] minikube v1.37.0 on Ubuntu 24.04 (kvm/amd64)
I1220 02:13:53.664260 38979 notify.go:221] Checking for updates...
I1220 02:13:53.664290 38979 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1220 02:13:53.665824 38979 out.go:179] - KUBECONFIG=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/kubeconfig
I1220 02:13:53.667283 38979 out.go:179] - MINIKUBE_HOME=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube
I1220 02:13:53.668904 38979 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1220 02:13:53.670341 38979 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1220 02:13:53.672156 38979 config.go:182] Loaded profile config "calico-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:13:53.672297 38979 config.go:182] Loaded profile config "custom-flannel-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:13:53.672434 38979 config.go:182] Loaded profile config "default-k8s-diff-port-032958": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:13:53.672545 38979 config.go:182] Loaded profile config "guest-073858": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v0.0.0
I1220 02:13:53.672679 38979 driver.go:422] Setting default libvirt URI to qemu:///system
I1220 02:13:53.714352 38979 out.go:179] * Using the kvm2 driver based on user configuration
I1220 02:13:53.715582 38979 start.go:309] selected driver: kvm2
I1220 02:13:53.715609 38979 start.go:928] validating driver "kvm2" against <nil>
I1220 02:13:53.715626 38979 start.go:939] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1220 02:13:53.716847 38979 start_flags.go:329] no existing cluster config was found, will generate one from the flags
I1220 02:13:53.717254 38979 start_flags.go:995] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1220 02:13:53.717297 38979 cni.go:84] Creating CNI manager for "false"
I1220 02:13:53.717349 38979 start.go:353] cluster config:
{Name:false-503505 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.3 ClusterName:false-503505 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:doc
ker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:false} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GP
Us: AutoPauseInterval:1m0s}
I1220 02:13:53.717508 38979 iso.go:125] acquiring lock: {Name:mk8cff2fd2ec419d0f1f974993910ae0235f0b9c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1220 02:13:53.719137 38979 out.go:179] * Starting "false-503505" primary control-plane node in "false-503505" cluster
I1220 02:13:53.720475 38979 preload.go:188] Checking if preload exists for k8s version v1.34.3 and runtime docker
I1220 02:13:53.720519 38979 preload.go:203] Found local preload: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.3-docker-overlay2-amd64.tar.lz4
I1220 02:13:53.720529 38979 cache.go:65] Caching tarball of preloaded images
I1220 02:13:53.720653 38979 preload.go:251] Found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.3-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1220 02:13:53.720670 38979 cache.go:68] Finished verifying existence of preloaded tar for v1.34.3 on docker
I1220 02:13:53.720801 38979 profile.go:143] Saving config to /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/false-503505/config.json ...
I1220 02:13:53.720830 38979 lock.go:35] WriteFile acquiring /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/false-503505/config.json: {Name:mkc8b6869a0bb6c3a942663395236fb8c2775a51 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1220 02:13:53.721027 38979 start.go:360] acquireMachinesLock for false-503505: {Name:mkeb3229b5d18611c16c8e938b31492b9b6546b6 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1220 02:13:53.721080 38979 start.go:364] duration metric: took 32.113µs to acquireMachinesLock for "false-503505"
I1220 02:13:53.721108 38979 start.go:93] Provisioning new machine with config: &{Name:false-503505 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubern
etesVersion:v1.34.3 ClusterName:false-503505 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:false} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryM
irror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}
I1220 02:13:53.721191 38979 start.go:125] createHost starting for "" (driver="kvm2")
I1220 02:13:53.104657 37878 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1220 02:13:53.203649 37878 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1220 02:13:53.414002 37878 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1220 02:13:53.414235 37878 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [custom-flannel-503505 localhost] and IPs [192.168.72.110 127.0.0.1 ::1]
I1220 02:13:53.718885 37878 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1220 02:13:53.719606 37878 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [custom-flannel-503505 localhost] and IPs [192.168.72.110 127.0.0.1 ::1]
I1220 02:13:54.333369 37878 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1220 02:13:54.424119 37878 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1220 02:13:54.440070 37878 kubeadm.go:319] [certs] Generating "sa" key and public key
I1220 02:13:54.440221 37878 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1220 02:13:54.643883 37878 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1220 02:13:54.882013 37878 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1220 02:13:54.904688 37878 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1220 02:13:55.025586 37878 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1220 02:13:55.145485 37878 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1220 02:13:55.145626 37878 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1220 02:13:55.148326 37878 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
W1220 02:13:54.723698 37762 node_ready.go:57] node "calico-503505" has "Ready":"False" status (will retry)
W1220 02:13:57.088471 37762 node_ready.go:57] node "calico-503505" has "Ready":"False" status (will retry)
I1220 02:13:55.150289 37878 out.go:252] - Booting up control plane ...
I1220 02:13:55.150458 37878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1220 02:13:55.151333 37878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1220 02:13:55.152227 37878 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1220 02:13:55.175699 37878 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1220 02:13:55.175981 37878 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1220 02:13:55.186275 37878 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1220 02:13:55.186852 37878 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1220 02:13:55.186945 37878 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1220 02:13:55.443272 37878 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1220 02:13:55.443453 37878 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1220 02:13:57.443421 37878 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 2.001962214s
I1220 02:13:57.453249 37878 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1220 02:13:57.453392 37878 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.72.110:8443/livez
I1220 02:13:57.453521 37878 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1220 02:13:57.453636 37878 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1220 02:13:53.723129 38979 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
I1220 02:13:53.723383 38979 start.go:159] libmachine.API.Create for "false-503505" (driver="kvm2")
I1220 02:13:53.723423 38979 client.go:173] LocalClient.Create starting
I1220 02:13:53.723510 38979 main.go:144] libmachine: Reading certificate data from /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem
I1220 02:13:53.723557 38979 main.go:144] libmachine: Decoding PEM data...
I1220 02:13:53.723581 38979 main.go:144] libmachine: Parsing certificate...
I1220 02:13:53.723676 38979 main.go:144] libmachine: Reading certificate data from /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/cert.pem
I1220 02:13:53.723706 38979 main.go:144] libmachine: Decoding PEM data...
I1220 02:13:53.723725 38979 main.go:144] libmachine: Parsing certificate...
I1220 02:13:53.724182 38979 main.go:144] libmachine: creating domain...
I1220 02:13:53.724217 38979 main.go:144] libmachine: creating network...
I1220 02:13:53.725920 38979 main.go:144] libmachine: found existing default network
I1220 02:13:53.726255 38979 main.go:144] libmachine: <network connections='4'>
<name>default</name>
<uuid>650ca552-1913-49ac-a1fd-736d0c584a06</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:de:58:ff'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
I1220 02:13:53.727630 38979 network.go:211] skipping subnet 192.168.39.0/24 that is taken: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName:virbr1 IfaceIPv4:192.168.39.1 IfaceMTU:1500 IfaceMAC:52:54:00:e8:02:c4} reservation:<nil>}
I1220 02:13:53.728421 38979 network.go:211] skipping subnet 192.168.50.0/24 that is taken: &{IP:192.168.50.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.50.0/24 Gateway:192.168.50.1 ClientMin:192.168.50.2 ClientMax:192.168.50.254 Broadcast:192.168.50.255 IsPrivate:true Interface:{IfaceName:virbr2 IfaceIPv4:192.168.50.1 IfaceMTU:1500 IfaceMAC:52:54:00:8b:7d:ff} reservation:<nil>}
I1220 02:13:53.729869 38979 network.go:206] using free private subnet 192.168.61.0/24: &{IP:192.168.61.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.61.0/24 Gateway:192.168.61.1 ClientMin:192.168.61.2 ClientMax:192.168.61.254 Broadcast:192.168.61.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001aac760}
I1220 02:13:53.729965 38979 main.go:144] libmachine: defining private network:
<network>
<name>mk-false-503505</name>
<dns enable='no'/>
<ip address='192.168.61.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.61.2' end='192.168.61.253'/>
</dhcp>
</ip>
</network>
I1220 02:13:53.736168 38979 main.go:144] libmachine: creating private network mk-false-503505 192.168.61.0/24...
I1220 02:13:53.810612 38979 main.go:144] libmachine: private network mk-false-503505 192.168.61.0/24 created
I1220 02:13:53.810976 38979 main.go:144] libmachine: <network>
<name>mk-false-503505</name>
<uuid>145d091e-eda6-4cfe-8946-ea394cfc6f9d</uuid>
<bridge name='virbr3' stp='on' delay='0'/>
<mac address='52:54:00:b5:b9:98'/>
<dns enable='no'/>
<ip address='192.168.61.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.61.2' end='192.168.61.253'/>
</dhcp>
</ip>
</network>
I1220 02:13:53.811017 38979 main.go:144] libmachine: setting up store path in /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505 ...
I1220 02:13:53.811066 38979 main.go:144] libmachine: building disk image from file:///home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/iso/amd64/minikube-v1.37.0-1765965980-22186-amd64.iso
I1220 02:13:53.811082 38979 common.go:152] Making disk image using store path: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube
I1220 02:13:53.811185 38979 main.go:144] libmachine: Downloading /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/boot2docker.iso from file:///home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/iso/amd64/minikube-v1.37.0-1765965980-22186-amd64.iso...
I1220 02:13:54.101881 38979 common.go:159] Creating ssh key: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa...
I1220 02:13:54.171818 38979 common.go:165] Creating raw disk image: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/false-503505.rawdisk...
I1220 02:13:54.171860 38979 main.go:144] libmachine: Writing magic tar header
I1220 02:13:54.171878 38979 main.go:144] libmachine: Writing SSH key tar header
I1220 02:13:54.171952 38979 common.go:179] Fixing permissions on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505 ...
I1220 02:13:54.172017 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505
I1220 02:13:54.172042 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505 (perms=drwx------)
I1220 02:13:54.172055 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines
I1220 02:13:54.172068 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines (perms=drwxr-xr-x)
I1220 02:13:54.172080 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube
I1220 02:13:54.172089 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube (perms=drwxr-xr-x)
I1220 02:13:54.172097 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160
I1220 02:13:54.172106 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160 (perms=drwxrwxr-x)
I1220 02:13:54.172116 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest/minikube-integration
I1220 02:13:54.172127 38979 main.go:144] libmachine: setting executable bit set on /home/minitest/minikube-integration (perms=drwxrwxr-x)
I1220 02:13:54.172134 38979 main.go:144] libmachine: checking permissions on dir: /home/minitest
I1220 02:13:54.172143 38979 main.go:144] libmachine: setting executable bit set on /home/minitest (perms=drwxr-x--x)
I1220 02:13:54.172153 38979 main.go:144] libmachine: checking permissions on dir: /home
I1220 02:13:54.172162 38979 main.go:144] libmachine: skipping /home - not owner
I1220 02:13:54.172166 38979 main.go:144] libmachine: defining domain...
I1220 02:13:54.173523 38979 main.go:144] libmachine: defining domain using XML:
<domain type='kvm'>
<name>false-503505</name>
<memory unit='MiB'>3072</memory>
<vcpu>2</vcpu>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'>
</cpu>
<os>
<type>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<devices>
<disk type='file' device='cdrom'>
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='default' io='threads' />
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/false-503505.rawdisk'/>
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='mk-false-503505'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
</rng>
</devices>
</domain>
I1220 02:13:54.178932 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:b7:52:73 in network default
I1220 02:13:54.179675 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:54.179696 38979 main.go:144] libmachine: starting domain...
I1220 02:13:54.179701 38979 main.go:144] libmachine: ensuring networks are active...
I1220 02:13:54.180774 38979 main.go:144] libmachine: Ensuring network default is active
I1220 02:13:54.181409 38979 main.go:144] libmachine: Ensuring network mk-false-503505 is active
I1220 02:13:54.182238 38979 main.go:144] libmachine: getting domain XML...
I1220 02:13:54.183538 38979 main.go:144] libmachine: starting domain XML:
<domain type='kvm'>
<name>false-503505</name>
<uuid>624dd300-6a99-4c02-9eff-8eb33e6519e9</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-noble'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/false-503505.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:4e:1e:41'/>
<source network='mk-false-503505'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:b7:52:73'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1220 02:13:55.365223 38979 main.go:144] libmachine: waiting for domain to start...
I1220 02:13:55.367393 38979 main.go:144] libmachine: domain is now running
I1220 02:13:55.367419 38979 main.go:144] libmachine: waiting for IP...
I1220 02:13:55.368500 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:55.369502 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:55.369522 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:55.369923 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:55.369977 38979 retry.go:31] will retry after 247.996373ms: waiting for domain to come up
I1220 02:13:55.619698 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:55.620501 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:55.620524 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:55.620981 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:55.621018 38979 retry.go:31] will retry after 253.163992ms: waiting for domain to come up
I1220 02:13:55.875623 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:55.876522 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:55.876543 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:55.876997 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:55.877034 38979 retry.go:31] will retry after 322.078046ms: waiting for domain to come up
I1220 02:13:56.200749 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:56.201573 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:56.201590 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:56.201993 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:56.202032 38979 retry.go:31] will retry after 398.279098ms: waiting for domain to come up
I1220 02:13:56.601723 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:56.602519 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:56.602554 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:56.603065 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:56.603103 38979 retry.go:31] will retry after 668.508453ms: waiting for domain to come up
I1220 02:13:57.272883 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:57.273735 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:57.273763 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:57.274179 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:57.274223 38979 retry.go:31] will retry after 936.48012ms: waiting for domain to come up
I1220 02:13:58.212951 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:58.213934 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:58.213955 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:58.214490 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:58.214540 38979 retry.go:31] will retry after 1.101549544s: waiting for domain to come up
W1220 02:13:59.093909 37762 node_ready.go:57] node "calico-503505" has "Ready":"False" status (will retry)
I1220 02:14:00.089963 37762 node_ready.go:49] node "calico-503505" is "Ready"
I1220 02:14:00.090003 37762 node_ready.go:38] duration metric: took 9.504754397s for node "calico-503505" to be "Ready" ...
I1220 02:14:00.090027 37762 api_server.go:52] waiting for apiserver process to appear ...
I1220 02:14:00.090096 37762 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1220 02:14:00.121908 37762 api_server.go:72] duration metric: took 11.479258368s to wait for apiserver process to appear ...
I1220 02:14:00.121945 37762 api_server.go:88] waiting for apiserver healthz status ...
I1220 02:14:00.121968 37762 api_server.go:253] Checking apiserver healthz at https://192.168.39.226:8443/healthz ...
I1220 02:14:00.133024 37762 api_server.go:279] https://192.168.39.226:8443/healthz returned 200:
ok
I1220 02:14:00.134499 37762 api_server.go:141] control plane version: v1.34.3
I1220 02:14:00.134533 37762 api_server.go:131] duration metric: took 12.580039ms to wait for apiserver health ...
I1220 02:14:00.134544 37762 system_pods.go:43] waiting for kube-system pods to appear ...
I1220 02:14:00.143085 37762 system_pods.go:59] 9 kube-system pods found
I1220 02:14:00.143143 37762 system_pods.go:61] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.143160 37762 system_pods.go:61] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.143171 37762 system_pods.go:61] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.143177 37762 system_pods.go:61] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.143183 37762 system_pods.go:61] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.143188 37762 system_pods.go:61] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.143194 37762 system_pods.go:61] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.143219 37762 system_pods.go:61] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.143233 37762 system_pods.go:61] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.143243 37762 system_pods.go:74] duration metric: took 8.690731ms to wait for pod list to return data ...
I1220 02:14:00.143254 37762 default_sa.go:34] waiting for default service account to be created ...
I1220 02:14:00.147300 37762 default_sa.go:45] found service account: "default"
I1220 02:14:00.147335 37762 default_sa.go:55] duration metric: took 4.072144ms for default service account to be created ...
I1220 02:14:00.147349 37762 system_pods.go:116] waiting for k8s-apps to be running ...
I1220 02:14:00.153827 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:00.153869 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.153882 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.153892 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.153900 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.153907 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.153911 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.153917 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.153922 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.153930 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.153953 37762 retry.go:31] will retry after 191.011989ms: missing components: kube-dns
I1220 02:14:00.353588 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:00.353638 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.353652 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.353665 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.353673 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.353681 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.353688 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.353696 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.353702 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.353710 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.353731 37762 retry.go:31] will retry after 332.593015ms: missing components: kube-dns
I1220 02:14:00.697960 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:00.698016 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:00.698032 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:00.698045 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:00.698051 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:00.698057 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:00.698062 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:00.698068 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:00.698073 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:00.698080 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:00.698098 37762 retry.go:31] will retry after 441.450882ms: missing components: kube-dns
I1220 02:14:01.147620 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:01.147663 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:01.147675 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:01.147685 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:01.147690 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:01.147697 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:01.147702 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:01.147707 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:01.147711 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:01.147718 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:01.147737 37762 retry.go:31] will retry after 398.996064ms: missing components: kube-dns
I1220 02:14:01.555710 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:01.555752 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:01.555764 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:01.555774 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:01.555779 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:01.555786 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:01.555791 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:01.555797 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:01.555802 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:01.555813 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:01.555831 37762 retry.go:31] will retry after 742.519055ms: missing components: kube-dns
I1220 02:14:02.306002 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:02.306049 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:02.306068 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:02.306080 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:02.306088 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:02.306097 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:02.306102 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:02.306109 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:02.306114 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:02.306119 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:02.306141 37762 retry.go:31] will retry after 687.588334ms: missing components: kube-dns
I1220 02:14:01.475480 37878 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 4.023883563s
I1220 02:13:59.318088 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:13:59.319169 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:13:59.319195 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:13:59.319707 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:13:59.319759 38979 retry.go:31] will retry after 1.133836082s: waiting for domain to come up
I1220 02:14:00.455752 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:00.457000 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:00.457032 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:00.457642 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:00.457696 38979 retry.go:31] will retry after 1.689205474s: waiting for domain to come up
I1220 02:14:02.149657 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:02.150579 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:02.150669 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:02.151167 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:02.151218 38979 retry.go:31] will retry after 1.402452731s: waiting for domain to come up
I1220 02:14:03.555309 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:03.556319 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:03.556389 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:03.556908 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:03.556948 38979 retry.go:31] will retry after 2.79303956s: waiting for domain to come up
I1220 02:14:03.304845 37878 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 5.854389668s
I1220 02:14:04.452000 37878 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 7.001670897s
I1220 02:14:04.482681 37878 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1220 02:14:04.509128 37878 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1220 02:14:04.533960 37878 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1220 02:14:04.534255 37878 kubeadm.go:319] [mark-control-plane] Marking the node custom-flannel-503505 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1220 02:14:04.549617 37878 kubeadm.go:319] [bootstrap-token] Using token: 5feew1.aaci0na7tzxpkq74
I1220 02:14:04.551043 37878 out.go:252] - Configuring RBAC rules ...
I1220 02:14:04.551218 37878 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1220 02:14:04.561847 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1220 02:14:04.591000 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1220 02:14:04.597908 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1220 02:14:04.606680 37878 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1220 02:14:04.614933 37878 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1220 02:14:04.862442 37878 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1220 02:14:05.356740 37878 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1220 02:14:05.862025 37878 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1220 02:14:05.865061 37878 kubeadm.go:319]
I1220 02:14:05.865156 37878 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1220 02:14:05.865168 37878 kubeadm.go:319]
I1220 02:14:05.865282 37878 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1220 02:14:05.865294 37878 kubeadm.go:319]
I1220 02:14:05.865359 37878 kubeadm.go:319] mkdir -p $HOME/.kube
I1220 02:14:05.865464 37878 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1220 02:14:05.865569 37878 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1220 02:14:05.865593 37878 kubeadm.go:319]
I1220 02:14:05.865675 37878 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1220 02:14:05.865685 37878 kubeadm.go:319]
I1220 02:14:05.865781 37878 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1220 02:14:05.865795 37878 kubeadm.go:319]
I1220 02:14:05.865876 37878 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1220 02:14:05.865983 37878 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1220 02:14:05.866079 37878 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1220 02:14:05.866085 37878 kubeadm.go:319]
I1220 02:14:05.866221 37878 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1220 02:14:05.866332 37878 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1220 02:14:05.866337 37878 kubeadm.go:319]
I1220 02:14:05.866459 37878 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 5feew1.aaci0na7tzxpkq74 \
I1220 02:14:05.866573 37878 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:34b132c11c5a062e0480b441f2caac7fcba372b596da4b2c80fd8c00c74704a7 \
I1220 02:14:05.866595 37878 kubeadm.go:319] --control-plane
I1220 02:14:05.866599 37878 kubeadm.go:319]
I1220 02:14:05.866684 37878 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1220 02:14:05.866688 37878 kubeadm.go:319]
I1220 02:14:05.866779 37878 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token 5feew1.aaci0na7tzxpkq74 \
I1220 02:14:05.866902 37878 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:34b132c11c5a062e0480b441f2caac7fcba372b596da4b2c80fd8c00c74704a7
I1220 02:14:05.869888 37878 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1220 02:14:05.869959 37878 cni.go:84] Creating CNI manager for "testdata/kube-flannel.yaml"
I1220 02:14:05.871868 37878 out.go:179] * Configuring testdata/kube-flannel.yaml (Container Networking Interface) ...
I1220 02:14:03.004292 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:03.004339 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:03.004352 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:03.004361 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:03.004367 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:03.004374 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:03.004379 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:03.004384 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:03.004389 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:03.004394 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:03.004412 37762 retry.go:31] will retry after 732.081748ms: missing components: kube-dns
I1220 02:14:03.744119 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:03.744161 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:03.744175 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:03.744185 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:03.744191 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:03.744214 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:03.744221 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:03.744227 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:03.744232 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:03.744241 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:03.744273 37762 retry.go:31] will retry after 1.276813322s: missing components: kube-dns
I1220 02:14:05.030079 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:05.030129 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:05.030146 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:05.030161 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:05.030168 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:05.030187 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:05.030194 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:05.030221 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:05.030229 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:05.030235 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:05.030257 37762 retry.go:31] will retry after 1.238453929s: missing components: kube-dns
I1220 02:14:06.275974 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:06.276021 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:06.276033 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:06.276049 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:06.276055 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:06.276061 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:06.276066 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:06.276077 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:06.276083 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:06.276087 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:06.276106 37762 retry.go:31] will retry after 1.908248969s: missing components: kube-dns
I1220 02:14:05.873406 37878 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.3/kubectl ...
I1220 02:14:05.873469 37878 ssh_runner.go:195] Run: stat -c "%s %y" /var/tmp/minikube/cni.yaml
I1220 02:14:05.881393 37878 ssh_runner.go:352] existence check for /var/tmp/minikube/cni.yaml: stat -c "%s %y" /var/tmp/minikube/cni.yaml: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/tmp/minikube/cni.yaml': No such file or directory
I1220 02:14:05.881431 37878 ssh_runner.go:362] scp testdata/kube-flannel.yaml --> /var/tmp/minikube/cni.yaml (4578 bytes)
I1220 02:14:05.936780 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1220 02:14:06.396862 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:06.396880 37878 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1220 02:14:06.396862 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes custom-flannel-503505 minikube.k8s.io/updated_at=2025_12_20T02_14_06_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=7cd9f41b7421760cf1f1eaa8725bdb975037b06d minikube.k8s.io/name=custom-flannel-503505 minikube.k8s.io/primary=true
I1220 02:14:06.630781 37878 ops.go:34] apiserver oom_adj: -16
I1220 02:14:06.630941 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:07.131072 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:07.631526 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:06.351650 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:06.352735 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:06.352774 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:06.353319 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:06.353358 38979 retry.go:31] will retry after 3.225841356s: waiting for domain to come up
I1220 02:14:08.131099 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:08.631429 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:09.131400 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:09.631470 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:10.131821 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:10.631264 37878 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1220 02:14:10.765536 37878 kubeadm.go:1114] duration metric: took 4.368721457s to wait for elevateKubeSystemPrivileges
I1220 02:14:10.765599 37878 kubeadm.go:403] duration metric: took 18.502801612s to StartCluster
I1220 02:14:10.765625 37878 settings.go:142] acquiring lock: {Name:mk57472848b32b0320e862b3ad8a64076ed3d76e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1220 02:14:10.765731 37878 settings.go:150] Updating kubeconfig: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/kubeconfig
I1220 02:14:10.767410 37878 lock.go:35] WriteFile acquiring /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/kubeconfig: {Name:mk7e6532318eb55e3c1811a528040bd41c46d8c7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1220 02:14:10.767716 37878 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1220 02:14:10.767786 37878 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1220 02:14:10.767867 37878 addons.go:70] Setting storage-provisioner=true in profile "custom-flannel-503505"
I1220 02:14:10.767885 37878 addons.go:239] Setting addon storage-provisioner=true in "custom-flannel-503505"
I1220 02:14:10.767747 37878 start.go:236] Will wait 15m0s for node &{Name: IP:192.168.72.110 Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}
I1220 02:14:10.767912 37878 host.go:66] Checking if "custom-flannel-503505" exists ...
I1220 02:14:10.767936 37878 config.go:182] Loaded profile config "custom-flannel-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:14:10.767992 37878 addons.go:70] Setting default-storageclass=true in profile "custom-flannel-503505"
I1220 02:14:10.768006 37878 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "custom-flannel-503505"
I1220 02:14:10.769347 37878 out.go:179] * Verifying Kubernetes components...
I1220 02:14:10.770891 37878 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:10.772643 37878 addons.go:239] Setting addon default-storageclass=true in "custom-flannel-503505"
I1220 02:14:10.772686 37878 host.go:66] Checking if "custom-flannel-503505" exists ...
I1220 02:14:10.772827 37878 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1220 02:14:10.774271 37878 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1220 02:14:10.774291 37878 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1220 02:14:10.775118 37878 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1220 02:14:10.775173 37878 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1220 02:14:10.778715 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.779148 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.779240 37878 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:31:8f:50", ip: ""} in network mk-custom-flannel-503505: {Iface:virbr4 ExpiryTime:2025-12-20 03:13:37 +0000 UTC Type:0 Mac:52:54:00:31:8f:50 Iaid: IPaddr:192.168.72.110 Prefix:24 Hostname:custom-flannel-503505 Clientid:01:52:54:00:31:8f:50}
I1220 02:14:10.779272 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined IP address 192.168.72.110 and MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.779776 37878 sshutil.go:53] new ssh client: &{IP:192.168.72.110 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/custom-flannel-503505/id_rsa Username:docker}
I1220 02:14:10.780325 37878 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:31:8f:50", ip: ""} in network mk-custom-flannel-503505: {Iface:virbr4 ExpiryTime:2025-12-20 03:13:37 +0000 UTC Type:0 Mac:52:54:00:31:8f:50 Iaid: IPaddr:192.168.72.110 Prefix:24 Hostname:custom-flannel-503505 Clientid:01:52:54:00:31:8f:50}
I1220 02:14:10.780367 37878 main.go:144] libmachine: domain custom-flannel-503505 has defined IP address 192.168.72.110 and MAC address 52:54:00:31:8f:50 in network mk-custom-flannel-503505
I1220 02:14:10.780605 37878 sshutil.go:53] new ssh client: &{IP:192.168.72.110 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/custom-flannel-503505/id_rsa Username:docker}
I1220 02:14:11.077940 37878 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.72.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1220 02:14:11.193874 37878 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1220 02:14:11.505786 37878 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1220 02:14:11.514993 37878 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1220 02:14:11.665088 37878 start.go:977] {"host.minikube.internal": 192.168.72.1} host record injected into CoreDNS's ConfigMap
I1220 02:14:11.666520 37878 node_ready.go:35] waiting up to 15m0s for node "custom-flannel-503505" to be "Ready" ...
I1220 02:14:12.188508 37878 kapi.go:214] "coredns" deployment in "kube-system" namespace and "custom-flannel-503505" context rescaled to 1 replicas
I1220 02:14:12.198043 37878 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1220 02:14:08.191550 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:08.191589 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:08.191605 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:08.191621 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:08.191627 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:08.191633 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:08.191639 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:08.191645 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:08.191652 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:08.191661 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:08.191680 37762 retry.go:31] will retry after 2.235844761s: missing components: kube-dns
I1220 02:14:10.441962 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:10.442003 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:10.442017 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Initialized:ContainersNotInitialized (containers with incomplete status: [ebpf-bootstrap]) / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:10.442028 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:10.442035 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:10.442041 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:10.442048 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:10.442053 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:10.442059 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:10.442063 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:10.442080 37762 retry.go:31] will retry after 3.072193082s: missing components: kube-dns
I1220 02:14:12.199503 37878 addons.go:530] duration metric: took 1.431726471s for enable addons: enabled=[storage-provisioner default-storageclass]
I1220 02:14:09.580950 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:09.581833 38979 main.go:144] libmachine: no network interface addresses found for domain false-503505 (source=lease)
I1220 02:14:09.581857 38979 main.go:144] libmachine: trying to list again with source=arp
I1220 02:14:09.582327 38979 main.go:144] libmachine: unable to find current IP address of domain false-503505 in network mk-false-503505 (interfaces detected: [])
I1220 02:14:09.582367 38979 retry.go:31] will retry after 3.32332613s: waiting for domain to come up
I1220 02:14:12.910036 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:12.911080 38979 main.go:144] libmachine: domain false-503505 has current primary IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:12.911099 38979 main.go:144] libmachine: found domain IP: 192.168.61.177
I1220 02:14:12.911107 38979 main.go:144] libmachine: reserving static IP address...
I1220 02:14:12.911656 38979 main.go:144] libmachine: unable to find host DHCP lease matching {name: "false-503505", mac: "52:54:00:4e:1e:41", ip: "192.168.61.177"} in network mk-false-503505
I1220 02:14:13.162890 38979 main.go:144] libmachine: reserved static IP address 192.168.61.177 for domain false-503505
I1220 02:14:13.162914 38979 main.go:144] libmachine: waiting for SSH...
I1220 02:14:13.162921 38979 main.go:144] libmachine: Getting to WaitForSSH function...
I1220 02:14:13.166240 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.166798 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:minikube Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.166839 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.167111 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.167442 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.167462 38979 main.go:144] libmachine: About to run SSH command:
exit 0
I1220 02:14:13.287553 38979 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1220 02:14:13.288033 38979 main.go:144] libmachine: domain creation complete
I1220 02:14:13.289768 38979 machine.go:94] provisionDockerMachine start ...
I1220 02:14:13.292967 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.293534 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.293566 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.293831 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.294091 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.294106 38979 main.go:144] libmachine: About to run SSH command:
hostname
I1220 02:14:13.408900 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: minikube
I1220 02:14:13.408931 38979 buildroot.go:166] provisioning hostname "false-503505"
I1220 02:14:13.412183 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.412723 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.412747 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.412990 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.413194 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.413235 38979 main.go:144] libmachine: About to run SSH command:
sudo hostname false-503505 && echo "false-503505" | sudo tee /etc/hostname
I1220 02:14:13.545519 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: false-503505
I1220 02:14:13.548500 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.548973 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.549006 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.549225 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.549497 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.549521 38979 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sfalse-503505' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 false-503505/g' /etc/hosts;
else
echo '127.0.1.1 false-503505' | sudo tee -a /etc/hosts;
fi
fi
I1220 02:14:13.522551 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:13.522594 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:13.522608 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Pending / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:13.522618 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:13.522624 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:13.522630 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:13.522633 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:13.522638 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:13.522643 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:13.522648 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:13.522671 37762 retry.go:31] will retry after 2.893940025s: missing components: kube-dns
I1220 02:14:16.427761 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:16.427804 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:16.427822 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:16.427834 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:16.427841 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:16.427847 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:16.427857 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:16.427863 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:16.427876 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:16.427881 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:16.427898 37762 retry.go:31] will retry after 5.028189083s: missing components: kube-dns
W1220 02:14:13.671217 37878 node_ready.go:57] node "custom-flannel-503505" has "Ready":"False" status (will retry)
W1220 02:14:16.172759 37878 node_ready.go:57] node "custom-flannel-503505" has "Ready":"False" status (will retry)
I1220 02:14:13.683279 38979 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1220 02:14:13.683320 38979 buildroot.go:172] set auth options {CertDir:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube CaCertPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem CaPrivateKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server.pem ServerKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server-key.pem ClientKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minik
ube/certs/cert.pem ServerCertSANs:[] StorePath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube}
I1220 02:14:13.683376 38979 buildroot.go:174] setting up certificates
I1220 02:14:13.683393 38979 provision.go:84] configureAuth start
I1220 02:14:13.687478 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.688091 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.688126 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.691975 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.692656 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.692715 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.692969 38979 provision.go:143] copyHostCerts
I1220 02:14:13.693049 38979 exec_runner.go:144] found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/ca.pem, removing ...
I1220 02:14:13.693064 38979 exec_runner.go:203] rm: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/ca.pem
I1220 02:14:13.693154 38979 exec_runner.go:151] cp: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem --> /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/ca.pem (1082 bytes)
I1220 02:14:13.693360 38979 exec_runner.go:144] found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cert.pem, removing ...
I1220 02:14:13.693377 38979 exec_runner.go:203] rm: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cert.pem
I1220 02:14:13.693441 38979 exec_runner.go:151] cp: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/cert.pem --> /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cert.pem (1127 bytes)
I1220 02:14:13.693548 38979 exec_runner.go:144] found /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/key.pem, removing ...
I1220 02:14:13.693560 38979 exec_runner.go:203] rm: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/key.pem
I1220 02:14:13.693612 38979 exec_runner.go:151] cp: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/key.pem --> /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/key.pem (1675 bytes)
I1220 02:14:13.693705 38979 provision.go:117] generating server cert: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server.pem ca-key=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem private-key=/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca-key.pem org=minitest.false-503505 san=[127.0.0.1 192.168.61.177 false-503505 localhost minikube]
I1220 02:14:13.709086 38979 provision.go:177] copyRemoteCerts
I1220 02:14:13.709144 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1220 02:14:13.713124 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.713703 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.713755 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.713967 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:13.809584 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1220 02:14:13.845246 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I1220 02:14:13.881465 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1220 02:14:13.915284 38979 provision.go:87] duration metric: took 231.876161ms to configureAuth
I1220 02:14:13.915334 38979 buildroot.go:189] setting minikube options for container-runtime
I1220 02:14:13.915608 38979 config.go:182] Loaded profile config "false-503505": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.3
I1220 02:14:13.919150 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.919807 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:13.919851 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:13.920156 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:13.920492 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:13.920559 38979 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1220 02:14:14.043505 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: tmpfs
I1220 02:14:14.043553 38979 buildroot.go:70] root file system type: tmpfs
I1220 02:14:14.043717 38979 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1220 02:14:14.047676 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.048130 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:14.048163 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.048457 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:14.048704 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:14.048784 38979 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1220 02:14:14.192756 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1220 02:14:14.196528 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.197071 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:14.197103 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:14.197379 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:14.197658 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:14.197687 38979 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1220 02:14:15.322369 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1220 02:14:15.322395 38979 machine.go:97] duration metric: took 2.032605943s to provisionDockerMachine
I1220 02:14:15.322407 38979 client.go:176] duration metric: took 21.59897051s to LocalClient.Create
I1220 02:14:15.322422 38979 start.go:167] duration metric: took 21.599041943s to libmachine.API.Create "false-503505"
I1220 02:14:15.322430 38979 start.go:293] postStartSetup for "false-503505" (driver="kvm2")
I1220 02:14:15.322443 38979 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1220 02:14:15.322513 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1220 02:14:15.325726 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.326187 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.326227 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.326423 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:15.421695 38979 ssh_runner.go:195] Run: cat /etc/os-release
I1220 02:14:15.426952 38979 info.go:137] Remote host: Buildroot 2025.02
I1220 02:14:15.426987 38979 filesync.go:126] Scanning /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/addons for local assets ...
I1220 02:14:15.427077 38979 filesync.go:126] Scanning /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/files for local assets ...
I1220 02:14:15.427228 38979 filesync.go:149] local asset: /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/files/etc/ssl/certs/130182.pem -> 130182.pem in /etc/ssl/certs
I1220 02:14:15.427399 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1220 02:14:15.440683 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/files/etc/ssl/certs/130182.pem --> /etc/ssl/certs/130182.pem (1708 bytes)
I1220 02:14:15.472751 38979 start.go:296] duration metric: took 150.304753ms for postStartSetup
I1220 02:14:15.476375 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.476839 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.476864 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.477147 38979 profile.go:143] Saving config to /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/profiles/false-503505/config.json ...
I1220 02:14:15.477371 38979 start.go:128] duration metric: took 21.756169074s to createHost
I1220 02:14:15.480134 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.480583 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.480606 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.480814 38979 main.go:144] libmachine: Using SSH client type: native
I1220 02:14:15.481047 38979 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84dd20] 0x8509c0 <nil> [] 0s} 192.168.61.177 22 <nil> <nil>}
I1220 02:14:15.481060 38979 main.go:144] libmachine: About to run SSH command:
date +%s.%N
I1220 02:14:15.603682 38979 main.go:144] libmachine: SSH cmd err, output: <nil>: 1766196855.575822881
I1220 02:14:15.603714 38979 fix.go:216] guest clock: 1766196855.575822881
I1220 02:14:15.603726 38979 fix.go:229] Guest: 2025-12-20 02:14:15.575822881 +0000 UTC Remote: 2025-12-20 02:14:15.477389482 +0000 UTC m=+21.885083527 (delta=98.433399ms)
I1220 02:14:15.603749 38979 fix.go:200] guest clock delta is within tolerance: 98.433399ms
I1220 02:14:15.603770 38979 start.go:83] releasing machines lock for "false-503505", held for 21.882663608s
I1220 02:14:15.607369 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.607986 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.608024 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.608687 38979 ssh_runner.go:195] Run: cat /version.json
I1220 02:14:15.608792 38979 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1220 02:14:15.612782 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.613294 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.613342 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.613436 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.613556 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:15.614074 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:15.614107 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:15.614392 38979 sshutil.go:53] new ssh client: &{IP:192.168.61.177 Port:22 SSHKeyPath:/home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/machines/false-503505/id_rsa Username:docker}
I1220 02:14:15.700660 38979 ssh_runner.go:195] Run: systemctl --version
I1220 02:14:15.725011 38979 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1220 02:14:15.731935 38979 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1220 02:14:15.732099 38979 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *bridge* -not -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e '/"dst": ".*:.*"/d' -e 's|^(.*)"dst": (.*)[,*]$|\1"dst": \2|g' -e '/"subnet": ".*:.*"/d' -e 's|^(.*)"subnet": ".*"(.*)[,*]$|\1"subnet": "10.244.0.0/16"\2|g' {}" ;
I1220 02:14:15.744444 38979 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *podman* -not -name *.mk_disabled -printf "%p, " -exec sh -c "sudo sed -i -r -e 's|^(.*)"subnet": ".*"(.*)$|\1"subnet": "10.244.0.0/16"\2|g' -e 's|^(.*)"gateway": ".*"(.*)$|\1"gateway": "10.244.0.1"\2|g' {}" ;
I1220 02:14:15.768292 38979 cni.go:308] configured [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1220 02:14:15.768338 38979 start.go:496] detecting cgroup driver to use...
I1220 02:14:15.768490 38979 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1220 02:14:15.808234 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1220 02:14:15.830328 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1220 02:14:15.848439 38979 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1220 02:14:15.848537 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1220 02:14:15.865682 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1220 02:14:15.887500 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1220 02:14:15.906005 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1220 02:14:15.925461 38979 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1220 02:14:15.940692 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1220 02:14:15.959326 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1220 02:14:15.978291 38979 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1220 02:14:15.997878 38979 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1220 02:14:16.014027 38979 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1220 02:14:16.014121 38979 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1220 02:14:16.033465 38979 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1220 02:14:16.050354 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:16.231792 38979 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1220 02:14:16.289416 38979 start.go:496] detecting cgroup driver to use...
I1220 02:14:16.289528 38979 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1220 02:14:16.314852 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1220 02:14:16.343915 38979 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1220 02:14:16.373499 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1220 02:14:16.393749 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1220 02:14:16.415218 38979 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1220 02:14:16.448678 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1220 02:14:16.471638 38979 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1220 02:14:16.499850 38979 ssh_runner.go:195] Run: which cri-dockerd
I1220 02:14:16.505358 38979 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1220 02:14:16.518773 38979 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1220 02:14:16.542267 38979 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1220 02:14:16.744157 38979 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1220 02:14:16.924495 38979 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1220 02:14:16.924658 38979 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1220 02:14:16.953858 38979 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1220 02:14:16.973889 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:17.180489 38979 ssh_runner.go:195] Run: sudo systemctl restart docker
I1220 02:14:17.720891 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1220 02:14:17.740432 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1220 02:14:17.756728 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1220 02:14:17.780803 38979 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1220 02:14:17.958835 38979 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1220 02:14:18.121422 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:18.283915 38979 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1220 02:14:18.319068 38979 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1220 02:14:18.334630 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:18.486080 38979 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1220 02:14:18.616715 38979 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1220 02:14:18.643324 38979 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1220 02:14:18.643397 38979 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1220 02:14:18.649921 38979 start.go:564] Will wait 60s for crictl version
I1220 02:14:18.649987 38979 ssh_runner.go:195] Run: which crictl
I1220 02:14:18.655062 38979 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1220 02:14:18.692451 38979 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.2
RuntimeApiVersion: v1
I1220 02:14:18.692517 38979 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1220 02:14:18.725655 38979 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1220 02:14:21.464469 37762 system_pods.go:86] 9 kube-system pods found
I1220 02:14:21.464520 37762 system_pods.go:89] "calico-kube-controllers-5c676f698c-5plhl" [8be42fe3-d58c-4bbf-9c51-8b689e28f671] Pending / Ready:ContainersNotReady (containers with unready status: [calico-kube-controllers]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-kube-controllers])
I1220 02:14:21.464535 37762 system_pods.go:89] "calico-node-d2xvk" [ebf1dd8e-ce6b-4cf7-a858-755e70edd320] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
I1220 02:14:21.464548 37762 system_pods.go:89] "coredns-66bc5c9577-hd2kg" [6f327eef-5be3-4358-8bf0-be3e0e9a13f1] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:21.464554 37762 system_pods.go:89] "etcd-calico-503505" [7fed5e30-e972-43f8-9ef7-7261b7216d7c] Running
I1220 02:14:21.464561 37762 system_pods.go:89] "kube-apiserver-calico-503505" [b81cb7dd-efa9-428d-b155-4c8d4fcb5566] Running
I1220 02:14:21.464567 37762 system_pods.go:89] "kube-controller-manager-calico-503505" [4a7a0d88-4103-46e7-8090-72b0e9d91c39] Running
I1220 02:14:21.464575 37762 system_pods.go:89] "kube-proxy-gzr82" [6a0b0ea5-98c0-4762-9423-1c10dee4576e] Running
I1220 02:14:21.464580 37762 system_pods.go:89] "kube-scheduler-calico-503505" [ecad390b-bd72-4f11-82fd-6544060a23c7] Running
I1220 02:14:21.464585 37762 system_pods.go:89] "storage-provisioner" [742d00f3-5d72-488a-afa1-1fcd40398cf6] Running
I1220 02:14:21.464605 37762 retry.go:31] will retry after 4.407665546s: missing components: kube-dns
W1220 02:14:18.670754 37878 node_ready.go:57] node "custom-flannel-503505" has "Ready":"False" status (will retry)
I1220 02:14:20.670484 37878 node_ready.go:49] node "custom-flannel-503505" is "Ready"
I1220 02:14:20.670541 37878 node_ready.go:38] duration metric: took 9.00398985s for node "custom-flannel-503505" to be "Ready" ...
I1220 02:14:20.670564 37878 api_server.go:52] waiting for apiserver process to appear ...
I1220 02:14:20.670694 37878 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1220 02:14:20.707244 37878 api_server.go:72] duration metric: took 9.939328273s to wait for apiserver process to appear ...
I1220 02:14:20.707279 37878 api_server.go:88] waiting for apiserver healthz status ...
I1220 02:14:20.707301 37878 api_server.go:253] Checking apiserver healthz at https://192.168.72.110:8443/healthz ...
I1220 02:14:20.717863 37878 api_server.go:279] https://192.168.72.110:8443/healthz returned 200:
ok
I1220 02:14:20.719853 37878 api_server.go:141] control plane version: v1.34.3
I1220 02:14:20.719883 37878 api_server.go:131] duration metric: took 12.596477ms to wait for apiserver health ...
I1220 02:14:20.719893 37878 system_pods.go:43] waiting for kube-system pods to appear ...
I1220 02:14:20.726999 37878 system_pods.go:59] 7 kube-system pods found
I1220 02:14:20.727077 37878 system_pods.go:61] "coredns-66bc5c9577-sqfn8" [5049fdb9-e7ad-4399-81f2-09401dc596ee] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:20.727090 37878 system_pods.go:61] "etcd-custom-flannel-503505" [f4d689e5-b742-43fc-9410-5bd64799d7ca] Running
I1220 02:14:20.727099 37878 system_pods.go:61] "kube-apiserver-custom-flannel-503505" [9edfcf3a-ac6f-45c4-85e3-989d63d60395] Running
I1220 02:14:20.727106 37878 system_pods.go:61] "kube-controller-manager-custom-flannel-503505" [2afe8125-f900-4f10-ac33-2aa361fb7c20] Running
I1220 02:14:20.727112 37878 system_pods.go:61] "kube-proxy-9kg7f" [5ca03971-c23b-486e-9469-cbff81fb30de] Running
I1220 02:14:20.727121 37878 system_pods.go:61] "kube-scheduler-custom-flannel-503505" [c4dcfa40-9627-4319-9f90-443f6964a9ec] Running
I1220 02:14:20.727128 37878 system_pods.go:61] "storage-provisioner" [412afff2-e1a9-4433-8599-0976c8111dbe] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:20.727138 37878 system_pods.go:74] duration metric: took 7.237909ms to wait for pod list to return data ...
I1220 02:14:20.727152 37878 default_sa.go:34] waiting for default service account to be created ...
I1220 02:14:20.738383 37878 default_sa.go:45] found service account: "default"
I1220 02:14:20.738419 37878 default_sa.go:55] duration metric: took 11.258578ms for default service account to be created ...
I1220 02:14:20.738431 37878 system_pods.go:116] waiting for k8s-apps to be running ...
I1220 02:14:20.755625 37878 system_pods.go:86] 7 kube-system pods found
I1220 02:14:20.755705 37878 system_pods.go:89] "coredns-66bc5c9577-sqfn8" [5049fdb9-e7ad-4399-81f2-09401dc596ee] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:20.755715 37878 system_pods.go:89] "etcd-custom-flannel-503505" [f4d689e5-b742-43fc-9410-5bd64799d7ca] Running
I1220 02:14:20.755743 37878 system_pods.go:89] "kube-apiserver-custom-flannel-503505" [9edfcf3a-ac6f-45c4-85e3-989d63d60395] Running
I1220 02:14:20.755825 37878 system_pods.go:89] "kube-controller-manager-custom-flannel-503505" [2afe8125-f900-4f10-ac33-2aa361fb7c20] Running
I1220 02:14:20.755884 37878 system_pods.go:89] "kube-proxy-9kg7f" [5ca03971-c23b-486e-9469-cbff81fb30de] Running
I1220 02:14:20.755901 37878 system_pods.go:89] "kube-scheduler-custom-flannel-503505" [c4dcfa40-9627-4319-9f90-443f6964a9ec] Running
I1220 02:14:20.755939 37878 system_pods.go:89] "storage-provisioner" [412afff2-e1a9-4433-8599-0976c8111dbe] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:20.755969 37878 retry.go:31] will retry after 243.64974ms: missing components: kube-dns
I1220 02:14:21.008936 37878 system_pods.go:86] 7 kube-system pods found
I1220 02:14:21.008998 37878 system_pods.go:89] "coredns-66bc5c9577-sqfn8" [5049fdb9-e7ad-4399-81f2-09401dc596ee] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:21.009009 37878 system_pods.go:89] "etcd-custom-flannel-503505" [f4d689e5-b742-43fc-9410-5bd64799d7ca] Running
I1220 02:14:21.009018 37878 system_pods.go:89] "kube-apiserver-custom-flannel-503505" [9edfcf3a-ac6f-45c4-85e3-989d63d60395] Running
I1220 02:14:21.009025 37878 system_pods.go:89] "kube-controller-manager-custom-flannel-503505" [2afe8125-f900-4f10-ac33-2aa361fb7c20] Running
I1220 02:14:21.009033 37878 system_pods.go:89] "kube-proxy-9kg7f" [5ca03971-c23b-486e-9469-cbff81fb30de] Running
I1220 02:14:21.009041 37878 system_pods.go:89] "kube-scheduler-custom-flannel-503505" [c4dcfa40-9627-4319-9f90-443f6964a9ec] Running
I1220 02:14:21.009076 37878 system_pods.go:89] "storage-provisioner" [412afff2-e1a9-4433-8599-0976c8111dbe] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:21.009102 37878 retry.go:31] will retry after 302.021984ms: missing components: kube-dns
I1220 02:14:21.324004 37878 system_pods.go:86] 7 kube-system pods found
I1220 02:14:21.324091 37878 system_pods.go:89] "coredns-66bc5c9577-sqfn8" [5049fdb9-e7ad-4399-81f2-09401dc596ee] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:21.324103 37878 system_pods.go:89] "etcd-custom-flannel-503505" [f4d689e5-b742-43fc-9410-5bd64799d7ca] Running
I1220 02:14:21.324111 37878 system_pods.go:89] "kube-apiserver-custom-flannel-503505" [9edfcf3a-ac6f-45c4-85e3-989d63d60395] Running
I1220 02:14:21.324118 37878 system_pods.go:89] "kube-controller-manager-custom-flannel-503505" [2afe8125-f900-4f10-ac33-2aa361fb7c20] Running
I1220 02:14:21.324136 37878 system_pods.go:89] "kube-proxy-9kg7f" [5ca03971-c23b-486e-9469-cbff81fb30de] Running
I1220 02:14:21.324144 37878 system_pods.go:89] "kube-scheduler-custom-flannel-503505" [c4dcfa40-9627-4319-9f90-443f6964a9ec] Running
I1220 02:14:21.324156 37878 system_pods.go:89] "storage-provisioner" [412afff2-e1a9-4433-8599-0976c8111dbe] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:21.324175 37878 retry.go:31] will retry after 335.232555ms: missing components: kube-dns
I1220 02:14:21.666742 37878 system_pods.go:86] 7 kube-system pods found
I1220 02:14:21.666783 37878 system_pods.go:89] "coredns-66bc5c9577-sqfn8" [5049fdb9-e7ad-4399-81f2-09401dc596ee] Pending / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1220 02:14:21.666790 37878 system_pods.go:89] "etcd-custom-flannel-503505" [f4d689e5-b742-43fc-9410-5bd64799d7ca] Running
I1220 02:14:21.666796 37878 system_pods.go:89] "kube-apiserver-custom-flannel-503505" [9edfcf3a-ac6f-45c4-85e3-989d63d60395] Running
I1220 02:14:21.666800 37878 system_pods.go:89] "kube-controller-manager-custom-flannel-503505" [2afe8125-f900-4f10-ac33-2aa361fb7c20] Running
I1220 02:14:21.666804 37878 system_pods.go:89] "kube-proxy-9kg7f" [5ca03971-c23b-486e-9469-cbff81fb30de] Running
I1220 02:14:21.666807 37878 system_pods.go:89] "kube-scheduler-custom-flannel-503505" [c4dcfa40-9627-4319-9f90-443f6964a9ec] Running
I1220 02:14:21.666811 37878 system_pods.go:89] "storage-provisioner" [412afff2-e1a9-4433-8599-0976c8111dbe] Pending / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1220 02:14:21.666827 37878 retry.go:31] will retry after 489.261855ms: missing components: kube-dns
I1220 02:14:22.160484 37878 system_pods.go:86] 7 kube-system pods found
I1220 02:14:22.160513 37878 system_pods.go:89] "coredns-66bc5c9577-sqfn8" [5049fdb9-e7ad-4399-81f2-09401dc596ee] Running
I1220 02:14:22.160519 37878 system_pods.go:89] "etcd-custom-flannel-503505" [f4d689e5-b742-43fc-9410-5bd64799d7ca] Running
I1220 02:14:22.160523 37878 system_pods.go:89] "kube-apiserver-custom-flannel-503505" [9edfcf3a-ac6f-45c4-85e3-989d63d60395] Running
I1220 02:14:22.160527 37878 system_pods.go:89] "kube-controller-manager-custom-flannel-503505" [2afe8125-f900-4f10-ac33-2aa361fb7c20] Running
I1220 02:14:22.160530 37878 system_pods.go:89] "kube-proxy-9kg7f" [5ca03971-c23b-486e-9469-cbff81fb30de] Running
I1220 02:14:22.160533 37878 system_pods.go:89] "kube-scheduler-custom-flannel-503505" [c4dcfa40-9627-4319-9f90-443f6964a9ec] Running
I1220 02:14:22.160536 37878 system_pods.go:89] "storage-provisioner" [412afff2-e1a9-4433-8599-0976c8111dbe] Running
I1220 02:14:22.160543 37878 system_pods.go:126] duration metric: took 1.422105753s to wait for k8s-apps to be running ...
I1220 02:14:22.160550 37878 system_svc.go:44] waiting for kubelet service to be running ....
I1220 02:14:22.160597 37878 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1220 02:14:22.178117 37878 system_svc.go:56] duration metric: took 17.557366ms WaitForService to wait for kubelet
I1220 02:14:22.178151 37878 kubeadm.go:587] duration metric: took 11.410240532s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1220 02:14:22.178236 37878 node_conditions.go:102] verifying NodePressure condition ...
I1220 02:14:22.182122 37878 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1220 02:14:22.182155 37878 node_conditions.go:123] node cpu capacity is 2
I1220 02:14:22.182173 37878 node_conditions.go:105] duration metric: took 3.930704ms to run NodePressure ...
I1220 02:14:22.182187 37878 start.go:242] waiting for startup goroutines ...
I1220 02:14:22.182208 37878 start.go:247] waiting for cluster config update ...
I1220 02:14:22.182225 37878 start.go:256] writing updated cluster config ...
I1220 02:14:22.184970 37878 ssh_runner.go:195] Run: rm -f paused
I1220 02:14:22.191086 37878 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1220 02:14:22.195279 37878 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-sqfn8" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.199785 37878 pod_ready.go:94] pod "coredns-66bc5c9577-sqfn8" is "Ready"
I1220 02:14:22.199808 37878 pod_ready.go:86] duration metric: took 4.507285ms for pod "coredns-66bc5c9577-sqfn8" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.202062 37878 pod_ready.go:83] waiting for pod "etcd-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.207096 37878 pod_ready.go:94] pod "etcd-custom-flannel-503505" is "Ready"
I1220 02:14:22.207127 37878 pod_ready.go:86] duration metric: took 5.03686ms for pod "etcd-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.209865 37878 pod_ready.go:83] waiting for pod "kube-apiserver-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.214503 37878 pod_ready.go:94] pod "kube-apiserver-custom-flannel-503505" is "Ready"
I1220 02:14:22.214540 37878 pod_ready.go:86] duration metric: took 4.645938ms for pod "kube-apiserver-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.217066 37878 pod_ready.go:83] waiting for pod "kube-controller-manager-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.755631 37878 pod_ready.go:94] pod "kube-controller-manager-custom-flannel-503505" is "Ready"
I1220 02:14:22.755662 37878 pod_ready.go:86] duration metric: took 538.56085ms for pod "kube-controller-manager-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:22.930705 37878 pod_ready.go:83] waiting for pod "kube-proxy-9kg7f" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:18.758400 38979 out.go:252] * Preparing Kubernetes v1.34.3 on Docker 28.5.2 ...
I1220 02:14:18.761850 38979 main.go:144] libmachine: domain false-503505 has defined MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:18.762426 38979 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4e:1e:41", ip: ""} in network mk-false-503505: {Iface:virbr3 ExpiryTime:2025-12-20 03:14:09 +0000 UTC Type:0 Mac:52:54:00:4e:1e:41 Iaid: IPaddr:192.168.61.177 Prefix:24 Hostname:false-503505 Clientid:01:52:54:00:4e:1e:41}
I1220 02:14:18.762460 38979 main.go:144] libmachine: domain false-503505 has defined IP address 192.168.61.177 and MAC address 52:54:00:4e:1e:41 in network mk-false-503505
I1220 02:14:18.762677 38979 ssh_runner.go:195] Run: grep 192.168.61.1 host.minikube.internal$ /etc/hosts
I1220 02:14:18.767521 38979 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.61.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1220 02:14:18.787918 38979 kubeadm.go:884] updating cluster {Name:false-503505 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.3
ClusterName:false-503505 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:false} Nodes:[{Name: IP:192.168.61.177 Port:8443 KubernetesVersion:v1.34.3 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:15m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror
: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1220 02:14:18.788080 38979 preload.go:188] Checking if preload exists for k8s version v1.34.3 and runtime docker
I1220 02:14:18.788143 38979 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1220 02:14:18.811874 38979 docker.go:691] Got preloaded images:
I1220 02:14:18.811902 38979 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.3 wasn't preloaded
I1220 02:14:18.811987 38979 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1220 02:14:18.825027 38979 ssh_runner.go:195] Run: which lz4
I1220 02:14:18.831228 38979 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
I1220 02:14:18.836810 38979 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/preloaded.tar.lz4': No such file or directory
I1220 02:14:18.836859 38979 ssh_runner.go:362] scp /home/minitest/minikube-integration/7cd9f41b7421760cf1f1eaa8725bdb975037b06d-7160/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.3-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (284304868 bytes)
I1220 02:14:20.026096 38979 docker.go:655] duration metric: took 1.194964371s to copy over tarball
I1220 02:14:20.026187 38979 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
I1220 02:14:21.663754 38979 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.637534144s)
I1220 02:14:21.663796 38979 ssh_runner.go:146] rm: /preloaded.tar.lz4
I1220 02:14:21.714741 38979 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1220 02:14:21.728590 38979 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
I1220 02:14:21.753302 38979 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1220 02:14:21.775766 38979 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1220 02:14:21.965281 38979 ssh_runner.go:195] Run: sudo systemctl restart docker
I1220 02:14:23.196123 37878 pod_ready.go:94] pod "kube-proxy-9kg7f" is "Ready"
I1220 02:14:23.196152 37878 pod_ready.go:86] duration metric: took 265.420415ms for pod "kube-proxy-9kg7f" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:23.395371 37878 pod_ready.go:83] waiting for pod "kube-scheduler-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:23.797357 37878 pod_ready.go:94] pod "kube-scheduler-custom-flannel-503505" is "Ready"
I1220 02:14:23.797395 37878 pod_ready.go:86] duration metric: took 401.98809ms for pod "kube-scheduler-custom-flannel-503505" in "kube-system" namespace to be "Ready" or be gone ...
I1220 02:14:23.797413 37878 pod_ready.go:40] duration metric: took 1.606275744s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1220 02:14:23.864427 37878 start.go:625] kubectl: 1.35.0, cluster: 1.34.3 (minor skew: 1)
I1220 02:14:23.865897 37878 out.go:179] * Done! kubectl is now configured to use "custom-flannel-503505" cluster and "default" namespace by default
==> Docker <==
Dec 20 02:13:25 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:13:25Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/24384c9b6386768f183a17a14b0915b4c06115ceca79b379c9a8caeb87ac9be2/resolv.conf as [nameserver 10.96.0.10 search kubernetes-dashboard.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Dec 20 02:13:26 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:26.077359482Z" level=warning msg="reference for unknown type: " digest="sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93" remote="docker.io/kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
Dec 20 02:13:33 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:13:33Z" level=info msg="Stop pulling image docker.io/kubernetesui/dashboard:v2.7.0@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93: Status: Downloaded newer image for kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.166995649Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.247637303Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.247742747Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 20 02:13:33 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:13:33Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.870943978Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.870972001Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.874954248Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 20 02:13:33 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:33.875104860Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:13:46 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:46.013938388Z" level=error msg="Handler for POST /v1.51/containers/e389ed009c41/pause returned error: cannot pause container e389ed009c414813f08a16331049a1f7b81ae99102e1d3eee00456652f70d78e: OCI runtime pause failed: container not running"
Dec 20 02:13:46 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:13:46.096234565Z" level=info msg="ignoring event" container=e389ed009c414813f08a16331049a1f7b81ae99102e1d3eee00456652f70d78e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 20 02:14:19 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:19Z" level=error msg="error getting RW layer size for container ID 'f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833': Error response from daemon: No such container: f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833"
Dec 20 02:14:19 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:19Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833'"
Dec 20 02:14:20 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:20Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-j9fnc_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"c17f03aae9a804c2000dd7a7f2df0a5c0e11cb7cc45d2898ceeb917e335ab8a6\""
Dec 20 02:14:20 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:20Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.054814750Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.173663258Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.173805989Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 20 02:14:21 default-k8s-diff-port-032958 cri-dockerd[1567]: time="2025-12-20T02:14:21Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.210054061Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.210106510Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.216155700Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 20 02:14:21 default-k8s-diff-port-032958 dockerd[1117]: time="2025-12-20T02:14:21.216230216Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
db82439a82773 6e38f40d628db 5 seconds ago Running storage-provisioner 2 b98cac4df9b58 storage-provisioner kube-system
3d0dc5e4eaf53 kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 52 seconds ago Running kubernetes-dashboard 0 c7214caee965e kubernetes-dashboard-855c9754f9-v5f62 kubernetes-dashboard
bd3af300e51d6 56cc512116c8f About a minute ago Running busybox 1 620275c9345e0 busybox default
c9a7560c3855f 52546a367cc9e About a minute ago Running coredns 1 bd05cab39e53f coredns-66bc5c9577-gjmjk kube-system
e389ed009c414 6e38f40d628db About a minute ago Exited storage-provisioner 1 b98cac4df9b58 storage-provisioner kube-system
8a1598184096c 36eef8e07bdd6 About a minute ago Running kube-proxy 1 fceaaba1c1db3 kube-proxy-22tlj kube-system
2808d78b661f8 aec12dadf56dd About a minute ago Running kube-scheduler 1 6d3fddf7afe4b kube-scheduler-default-k8s-diff-port-032958 kube-system
5d487135b34c5 a3e246e9556e9 About a minute ago Running etcd 1 57ad4b77ed607 etcd-default-k8s-diff-port-032958 kube-system
0be7d44211125 5826b25d990d7 About a minute ago Running kube-controller-manager 1 f7e02a8a528fa kube-controller-manager-default-k8s-diff-port-032958 kube-system
799ae6e77e4dc aa27095f56193 About a minute ago Running kube-apiserver 1 c0277aff9f306 kube-apiserver-default-k8s-diff-port-032958 kube-system
9a4671ba050b2 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 9bfc558dcff48 busybox default
aef0cd5a3775d 52546a367cc9e 2 minutes ago Exited coredns 0 4e8574a6b885b coredns-66bc5c9577-gjmjk kube-system
696c72bae65f2 36eef8e07bdd6 2 minutes ago Exited kube-proxy 0 959487a2071a7 kube-proxy-22tlj kube-system
37cee352777b9 aa27095f56193 3 minutes ago Exited kube-apiserver 0 042ea7540f943 kube-apiserver-default-k8s-diff-port-032958 kube-system
6955eb7dbb7a8 a3e246e9556e9 3 minutes ago Exited etcd 0 1ae4fd44c2900 etcd-default-k8s-diff-port-032958 kube-system
bc3e91d6c19d6 5826b25d990d7 3 minutes ago Exited kube-controller-manager 0 ec2c7b618f7f7 kube-controller-manager-default-k8s-diff-port-032958 kube-system
44fb178dfab72 aec12dadf56dd 3 minutes ago Exited kube-scheduler 0 010ff1a843791 kube-scheduler-default-k8s-diff-port-032958 kube-system
==> coredns [aef0cd5a3775] <==
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
[INFO] Reloading complete
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [c9a7560c3855] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:39548 - 58159 "HINFO IN 6794078486954714189.4770737732440681574. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.045655293s
==> describe nodes <==
Name: default-k8s-diff-port-032958
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=default-k8s-diff-port-032958
kubernetes.io/os=linux
minikube.k8s.io/commit=7cd9f41b7421760cf1f1eaa8725bdb975037b06d
minikube.k8s.io/name=default-k8s-diff-port-032958
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_12_20T02_11_24_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 20 Dec 2025 02:11:20 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: default-k8s-diff-port-032958
AcquireTime: <unset>
RenewTime: Sat, 20 Dec 2025 02:14:19 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:11:18 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:11:18 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:11:18 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 20 Dec 2025 02:14:20 +0000 Sat, 20 Dec 2025 02:13:24 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.83.139
Hostname: default-k8s-diff-port-032958
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: a22ece73f0a74620b511d2c9063270d7
System UUID: a22ece73-f0a7-4620-b511-d2c9063270d7
Boot ID: 3a1ecf6e-4165-4ac3-94cb-43972902c57c
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.2
Kubelet Version: v1.34.3
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m21s
kube-system coredns-66bc5c9577-gjmjk 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m57s
kube-system etcd-default-k8s-diff-port-032958 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 3m1s
kube-system kube-apiserver-default-k8s-diff-port-032958 250m (12%) 0 (0%) 0 (0%) 0 (0%) 3m2s
kube-system kube-controller-manager-default-k8s-diff-port-032958 200m (10%) 0 (0%) 0 (0%) 0 (0%) 3m1s
kube-system kube-proxy-22tlj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m57s
kube-system kube-scheduler-default-k8s-diff-port-032958 100m (5%) 0 (0%) 0 (0%) 0 (0%) 3m1s
kube-system metrics-server-746fcd58dc-r9hzl 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 2m11s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m54s
kubernetes-dashboard dashboard-metrics-scraper-6ffb444bf9-wzcc7 0 (0%) 0 (0%) 0 (0%) 0 (0%) 66s
kubernetes-dashboard kubernetes-dashboard-855c9754f9-v5f62 0 (0%) 0 (0%) 0 (0%) 0 (0%) 66s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m54s kube-proxy
Normal Starting 69s kube-proxy
Normal NodeAllocatableEnforced 3m9s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 3m9s (x8 over 3m9s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 3m9s (x8 over 3m9s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 3m9s (x7 over 3m9s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
Normal Starting 3m9s kubelet Starting kubelet.
Normal Starting 3m2s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 3m2s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 3m1s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 3m1s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 3m1s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
Normal RegisteredNode 2m58s node-controller Node default-k8s-diff-port-032958 event: Registered Node default-k8s-diff-port-032958 in Controller
Normal NodeReady 2m57s kubelet Node default-k8s-diff-port-032958 status is now: NodeReady
Normal Starting 76s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 76s (x8 over 76s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 76s (x8 over 76s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 76s (x7 over 76s) kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 76s kubelet Updated Node Allocatable limit across pods
Warning Rebooted 71s kubelet Node default-k8s-diff-port-032958 has been rebooted, boot id: 3a1ecf6e-4165-4ac3-94cb-43972902c57c
Normal RegisteredNode 67s node-controller Node default-k8s-diff-port-032958 event: Registered Node default-k8s-diff-port-032958 in Controller
Normal Starting 6s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 5s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 5s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5s kubelet Node default-k8s-diff-port-032958 status is now: NodeHasSufficientPID
==> dmesg <==
[Dec20 02:12] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.000038] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.003240] (rpcbind)[120]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.994669] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000027] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.151734] kauditd_printk_skb: 1 callbacks suppressed
[Dec20 02:13] kauditd_printk_skb: 393 callbacks suppressed
[ +0.106540] kauditd_printk_skb: 46 callbacks suppressed
[ +5.723521] kauditd_printk_skb: 165 callbacks suppressed
[ +3.591601] kauditd_printk_skb: 134 callbacks suppressed
[ +0.607561] kauditd_printk_skb: 259 callbacks suppressed
[ +0.307946] kauditd_printk_skb: 17 callbacks suppressed
[Dec20 02:14] kauditd_printk_skb: 35 callbacks suppressed
[ +5.244764] kauditd_printk_skb: 33 callbacks suppressed
==> etcd [5d487135b34c] <==
{"level":"warn","ts":"2025-12-20T02:13:13.342109Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50660","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.352027Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50684","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.369107Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50704","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.385882Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50724","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.394142Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50748","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.401109Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50766","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.409984Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50786","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.418468Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50796","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.428572Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50810","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.434912Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50852","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.445522Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50868","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.454332Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50884","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.465435Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50906","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.483111Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50914","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.490648Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50946","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.499438Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50956","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:13.572659Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:50980","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:13:29.160598Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"521.564224ms","expected-duration":"100ms","prefix":"","request":"header:<ID:13052446816451747392 > lease_revoke:<id:35239b39868e0a7a>","response":"size:28"}
{"level":"info","ts":"2025-12-20T02:13:29.161474Z","caller":"traceutil/trace.go:172","msg":"trace[1265080339] linearizableReadLoop","detail":"{readStateIndex:772; appliedIndex:771; }","duration":"411.486582ms","start":"2025-12-20T02:13:28.749972Z","end":"2025-12-20T02:13:29.161458Z","steps":["trace[1265080339] 'read index received' (duration: 33.594µs)","trace[1265080339] 'applied index is now lower than readState.Index' (duration: 411.451844ms)"],"step_count":2}
{"level":"warn","ts":"2025-12-20T02:13:29.161591Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"411.631732ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-20T02:13:29.161613Z","caller":"traceutil/trace.go:172","msg":"trace[1600534378] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:727; }","duration":"411.662139ms","start":"2025-12-20T02:13:28.749943Z","end":"2025-12-20T02:13:29.161605Z","steps":["trace[1600534378] 'agreement among raft nodes before linearized reading' (duration: 411.61436ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:13:29.162719Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"311.7284ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-032958\" limit:1 ","response":"range_response_count:1 size:5168"}
{"level":"info","ts":"2025-12-20T02:13:29.163046Z","caller":"traceutil/trace.go:172","msg":"trace[1971843700] range","detail":"{range_begin:/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-032958; range_end:; response_count:1; response_revision:727; }","duration":"312.095123ms","start":"2025-12-20T02:13:28.850939Z","end":"2025-12-20T02:13:29.163034Z","steps":["trace[1971843700] 'agreement among raft nodes before linearized reading' (duration: 311.117462ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:13:29.163083Z","caller":"v3rpc/interceptor.go:202","msg":"request stats","start time":"2025-12-20T02:13:28.850904Z","time spent":"312.166241ms","remote":"127.0.0.1:50178","response type":"/etcdserverpb.KV/Range","request count":0,"request size":74,"response count":1,"response size":5191,"request content":"key:\"/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-032958\" limit:1 "}
{"level":"info","ts":"2025-12-20T02:13:30.222290Z","caller":"traceutil/trace.go:172","msg":"trace[252289306] transaction","detail":"{read_only:false; response_revision:728; number_of_response:1; }","duration":"269.402974ms","start":"2025-12-20T02:13:29.952867Z","end":"2025-12-20T02:13:30.222270Z","steps":["trace[252289306] 'process raft request' (duration: 269.235053ms)"],"step_count":1}
==> etcd [6955eb7dbb7a] <==
{"level":"warn","ts":"2025-12-20T02:11:19.847959Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60060","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-20T02:11:19.949918Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60078","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-12-20T02:12:07.244895Z","caller":"traceutil/trace.go:172","msg":"trace[798266364] linearizableReadLoop","detail":"{readStateIndex:511; appliedIndex:511; }","duration":"213.060794ms","start":"2025-12-20T02:12:07.031794Z","end":"2025-12-20T02:12:07.244854Z","steps":["trace[798266364] 'read index received' (duration: 213.055389ms)","trace[798266364] 'applied index is now lower than readState.Index' (duration: 4.109µs)"],"step_count":2}
{"level":"info","ts":"2025-12-20T02:12:07.245037Z","caller":"traceutil/trace.go:172","msg":"trace[286472680] transaction","detail":"{read_only:false; response_revision:494; number_of_response:1; }","duration":"297.72601ms","start":"2025-12-20T02:12:06.947300Z","end":"2025-12-20T02:12:07.245026Z","steps":["trace[286472680] 'process raft request' (duration: 297.578574ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:12:07.245042Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"213.193567ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-20T02:12:07.245100Z","caller":"traceutil/trace.go:172","msg":"trace[1257312239] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:493; }","duration":"213.303974ms","start":"2025-12-20T02:12:07.031787Z","end":"2025-12-20T02:12:07.245091Z","steps":["trace[1257312239] 'agreement among raft nodes before linearized reading' (duration: 213.173447ms)"],"step_count":1}
{"level":"warn","ts":"2025-12-20T02:12:08.425636Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"189.814071ms","expected-duration":"100ms","prefix":"","request":"header:<ID:13052446816422726242 > lease_revoke:<id:35239b39868e09cb>","response":"size:28"}
{"level":"info","ts":"2025-12-20T02:12:15.681646Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-12-20T02:12:15.681764Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"default-k8s-diff-port-032958","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.139:2380"],"advertise-client-urls":["https://192.168.83.139:2379"]}
{"level":"error","ts":"2025-12-20T02:12:15.681878Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-20T02:12:22.684233Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-20T02:12:22.686809Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-20T02:12:22.686860Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"911810311894b523","current-leader-member-id":"911810311894b523"}
{"level":"info","ts":"2025-12-20T02:12:22.687961Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
{"level":"info","ts":"2025-12-20T02:12:22.688006Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"warn","ts":"2025-12-20T02:12:22.691490Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-20T02:12:22.691626Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-20T02:12:22.691850Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-12-20T02:12:22.692143Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.83.139:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-20T02:12:22.692250Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.83.139:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-20T02:12:22.692290Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.139:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-20T02:12:22.695968Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.83.139:2380"}
{"level":"error","ts":"2025-12-20T02:12:22.696039Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.139:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-20T02:12:22.696144Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.83.139:2380"}
{"level":"info","ts":"2025-12-20T02:12:22.696154Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"default-k8s-diff-port-032958","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.139:2380"],"advertise-client-urls":["https://192.168.83.139:2379"]}
==> kernel <==
02:14:25 up 1 min, 0 users, load average: 1.25, 0.47, 0.17
Linux default-k8s-diff-port-032958 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Dec 17 12:49:57 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [37cee352777b] <==
W1220 02:12:24.853572 1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:24.853799 1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:24.886374 1 logging.go:55] [core] [Channel #87 SubChannel #89]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:24.979017 1 logging.go:55] [core] [Channel #9 SubChannel #11]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.000084 1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.002681 1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.055296 1 logging.go:55] [core] [Channel #219 SubChannel #221]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.076421 1 logging.go:55] [core] [Channel #75 SubChannel #77]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.117381 1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.121093 1 logging.go:55] [core] [Channel #183 SubChannel #185]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.174493 1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.192366 1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.290406 1 logging.go:55] [core] [Channel #262 SubChannel #263]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.339079 1 logging.go:55] [core] [Channel #103 SubChannel #105]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.360110 1 logging.go:55] [core] [Channel #131 SubChannel #133]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.370079 1 logging.go:55] [core] [Channel #111 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.387509 1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.392242 1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.418164 1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.463768 1 logging.go:55] [core] [Channel #119 SubChannel #121]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.491271 1 logging.go:55] [core] [Channel #231 SubChannel #233]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.551484 1 logging.go:55] [core] [Channel #4 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.672624 1 logging.go:55] [core] [Channel #171 SubChannel #173]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.708145 1 logging.go:55] [core] [Channel #203 SubChannel #205]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1220 02:12:25.738375 1 logging.go:55] [core] [Channel #211 SubChannel #213]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [799ae6e77e4d] <==
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1220 02:13:15.375295 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
E1220 02:13:15.375659 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1220 02:13:15.376472 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1220 02:13:16.429720 1 handler.go:285] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
I1220 02:13:17.061950 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1220 02:13:17.106313 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1220 02:13:17.143082 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1220 02:13:17.149304 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1220 02:13:18.957068 1 controller.go:667] quota admission added evaluator for: endpoints
I1220 02:13:18.991435 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1220 02:13:19.164812 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1220 02:13:19.310960 1 controller.go:667] quota admission added evaluator for: namespaces
I1220 02:13:19.735545 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.100.251.44"}
I1220 02:13:19.755566 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.100.131.142"}
W1220 02:14:18.888986 1 handler_proxy.go:99] no RequestInfo found in the context
E1220 02:14:18.889066 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1220 02:14:18.889082 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1220 02:14:18.898096 1 handler_proxy.go:99] no RequestInfo found in the context
E1220 02:14:18.898161 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1220 02:14:18.898178 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [0be7d4421112] <==
I1220 02:13:18.949472 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I1220 02:13:18.949789 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I1220 02:13:18.911345 1 shared_informer.go:356] "Caches are synced" controller="GC"
I1220 02:13:18.951606 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I1220 02:13:18.954539 1 shared_informer.go:356] "Caches are synced" controller="job"
I1220 02:13:18.928851 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:13:18.929424 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I1220 02:13:18.967844 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:13:18.972782 1 shared_informer.go:356] "Caches are synced" controller="stateful set"
I1220 02:13:18.972915 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:13:18.972963 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1220 02:13:18.972974 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1220 02:13:18.978649 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:13:19.003577 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
E1220 02:13:19.507058 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.557712 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.579564 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.584826 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.605680 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.607173 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.612896 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1220 02:13:19.619443 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1220 02:13:28.942844 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
E1220 02:14:18.972742 1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
I1220 02:14:19.019968 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
==> kube-controller-manager [bc3e91d6c19d] <==
I1220 02:11:27.795191 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1220 02:11:27.795197 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1220 02:11:27.795206 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I1220 02:11:27.795417 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I1220 02:11:27.804009 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I1220 02:11:27.809262 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="default-k8s-diff-port-032958" podCIDRs=["10.244.0.0/24"]
I1220 02:11:27.814727 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I1220 02:11:27.816005 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:11:27.818197 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I1220 02:11:27.827800 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:11:27.835424 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1220 02:11:27.835598 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1220 02:11:27.835777 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="default-k8s-diff-port-032958"
I1220 02:11:27.835796 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-serving"
I1220 02:11:27.835833 1 node_lifecycle_controller.go:1025] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
I1220 02:11:27.835932 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1220 02:11:27.835939 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1220 02:11:27.835945 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1220 02:11:27.838763 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I1220 02:11:27.838905 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kubelet-client"
I1220 02:11:27.838920 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I1220 02:11:27.843443 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I1220 02:11:27.845334 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1220 02:11:27.853111 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1220 02:11:32.836931 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
==> kube-proxy [696c72bae65f] <==
I1220 02:11:30.533772 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1220 02:11:30.634441 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1220 02:11:30.634675 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.139"]
E1220 02:11:30.635231 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1220 02:11:30.763679 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1220 02:11:30.764391 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1220 02:11:30.764588 1 server_linux.go:132] "Using iptables Proxier"
I1220 02:11:30.801765 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1220 02:11:30.802104 1 server.go:527] "Version info" version="v1.34.3"
I1220 02:11:30.802116 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1220 02:11:30.821963 1 config.go:309] "Starting node config controller"
I1220 02:11:30.822050 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1220 02:11:30.822061 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1220 02:11:30.826798 1 config.go:200] "Starting service config controller"
I1220 02:11:30.826954 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1220 02:11:30.829847 1 config.go:106] "Starting endpoint slice config controller"
I1220 02:11:30.830754 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1220 02:11:30.831058 1 config.go:403] "Starting serviceCIDR config controller"
I1220 02:11:30.831070 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1220 02:11:30.937234 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1220 02:11:30.937307 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1220 02:11:30.933586 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-proxy [8a1598184096] <==
I1220 02:13:16.000300 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1220 02:13:16.100699 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1220 02:13:16.100734 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.139"]
E1220 02:13:16.100793 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1220 02:13:16.145133 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1220 02:13:16.145459 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1220 02:13:16.145683 1 server_linux.go:132] "Using iptables Proxier"
I1220 02:13:16.156575 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1220 02:13:16.157810 1 server.go:527] "Version info" version="v1.34.3"
I1220 02:13:16.158021 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1220 02:13:16.162964 1 config.go:200] "Starting service config controller"
I1220 02:13:16.162999 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1220 02:13:16.163014 1 config.go:106] "Starting endpoint slice config controller"
I1220 02:13:16.163018 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1220 02:13:16.163027 1 config.go:403] "Starting serviceCIDR config controller"
I1220 02:13:16.163030 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1220 02:13:16.166161 1 config.go:309] "Starting node config controller"
I1220 02:13:16.166330 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1220 02:13:16.166459 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1220 02:13:16.263219 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1220 02:13:16.263310 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1220 02:13:16.263327 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-scheduler [2808d78b661f] <==
I1220 02:13:12.086051 1 serving.go:386] Generated self-signed cert in-memory
I1220 02:13:14.442280 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.3"
I1220 02:13:14.442329 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1220 02:13:14.455168 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController
I1220 02:13:14.455457 1 shared_informer.go:349] "Waiting for caches to sync" controller="RequestHeaderAuthRequestController"
I1220 02:13:14.455686 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:13:14.455746 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:13:14.455761 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file"
I1220 02:13:14.455884 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file"
I1220 02:13:14.456412 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1220 02:13:14.456890 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1220 02:13:14.556446 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file"
I1220 02:13:14.556930 1 shared_informer.go:356] "Caches are synced" controller="RequestHeaderAuthRequestController"
I1220 02:13:14.557255 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kube-scheduler [44fb178dfab7] <==
E1220 02:11:20.961299 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1220 02:11:20.964924 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1220 02:11:20.965293 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1220 02:11:20.965512 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1220 02:11:21.832650 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1220 02:11:21.832947 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1220 02:11:21.847314 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1220 02:11:21.848248 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1220 02:11:21.883915 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1220 02:11:21.922925 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E1220 02:11:21.948354 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1220 02:11:21.988956 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1220 02:11:22.072320 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1220 02:11:22.112122 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1220 02:11:22.125974 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1220 02:11:22.146170 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1220 02:11:22.171595 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1220 02:11:22.226735 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
I1220 02:11:25.144517 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:12:15.706842 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1220 02:12:15.706898 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1220 02:12:15.706917 1 tlsconfig.go:258] "Shutting down DynamicServingCertificateController"
I1220 02:12:15.706972 1 configmap_cafile_content.go:226] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1220 02:12:15.707164 1 server.go:265] "[graceful-termination] secure server is exiting"
E1220 02:12:15.707186 1 run.go:72] "command failed" err="finished without leader elect"
==> kubelet <==
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.306089 4206 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="959487a2071a7d265b217d3aee2b7e4fbafb02bb0585f7ff40beae30aa17b725"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.330357 4206 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ae4fd44c29005031aebaf78608172fd0e41f69bee4dd72c3ea114e035fc7e8e"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.330548 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:20.342746 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"etcd-default-k8s-diff-port-032958\" already exists" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.617352 4206 apiserver.go:52] "Watching apiserver"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.683834 4206 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.742502 4206 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/07a41d99-89a6-4d25-b7cf-57f49fbdea5a-lib-modules\") pod \"kube-proxy-22tlj\" (UID: \"07a41d99-89a6-4d25-b7cf-57f49fbdea5a\") " pod="kube-system/kube-proxy-22tlj"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.743177 4206 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/07a41d99-89a6-4d25-b7cf-57f49fbdea5a-xtables-lock\") pod \"kube-proxy-22tlj\" (UID: \"07a41d99-89a6-4d25-b7cf-57f49fbdea5a\") " pod="kube-system/kube-proxy-22tlj"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.743223 4206 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/a74ca514-b136-40a6-9fd7-27c96e23bca7-tmp\") pod \"storage-provisioner\" (UID: \"a74ca514-b136-40a6-9fd7-27c96e23bca7\") " pod="kube-system/storage-provisioner"
Dec 20 02:14:20 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:20.941330 4206 scope.go:117] "RemoveContainer" containerID="e389ed009c414813f08a16331049a1f7b81ae99102e1d3eee00456652f70d78e"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.187714 4206 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.188551 4206 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.190144 4206 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-wzcc7_kubernetes-dashboard(6951d269-7815-46e0-bfd0-c9dba02d7a47): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.191545 4206 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-wzcc7" podUID="6951d269-7815-46e0-bfd0-c9dba02d7a47"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.218131 4206 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.218192 4206 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.218346 4206 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-r9hzl_kube-system(ea98af6d-2555-48e1-9403-91cdbace7b1c): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.219866 4206 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-r9hzl" podUID="ea98af6d-2555-48e1-9403-91cdbace7b1c"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.415555 4206 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f14a7d35a9c218a36064019d8d70cd5e2dc10c8fff7e745b9c07943ea6e37833"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.445678 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.445968 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: I1220 02:14:21.446323 4206 kubelet.go:3220] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.476713 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"kube-scheduler-default-k8s-diff-port-032958\" already exists" pod="kube-system/kube-scheduler-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.478173 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"etcd-default-k8s-diff-port-032958\" already exists" pod="kube-system/etcd-default-k8s-diff-port-032958"
Dec 20 02:14:21 default-k8s-diff-port-032958 kubelet[4206]: E1220 02:14:21.479470 4206 kubelet.go:3222] "Failed creating a mirror pod" err="pods \"kube-apiserver-default-k8s-diff-port-032958\" already exists" pod="kube-system/kube-apiserver-default-k8s-diff-port-032958"
==> kubernetes-dashboard [3d0dc5e4eaf5] <==
2025/12/20 02:13:33 Starting overwatch
2025/12/20 02:13:33 Using namespace: kubernetes-dashboard
2025/12/20 02:13:33 Using in-cluster config to connect to apiserver
2025/12/20 02:13:33 Using secret token for csrf signing
2025/12/20 02:13:33 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/12/20 02:13:33 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/12/20 02:13:33 Successful initial request to the apiserver, version: v1.34.3
2025/12/20 02:13:33 Generating JWE encryption key
2025/12/20 02:13:33 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/12/20 02:13:33 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/12/20 02:13:33 Initializing JWE encryption key from synchronized object
2025/12/20 02:13:33 Creating in-cluster Sidecar client
2025/12/20 02:13:33 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/12/20 02:13:33 Serving insecurely on HTTP port: 9090
2025/12/20 02:14:19 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [db82439a8277] <==
I1220 02:14:21.324067 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1220 02:14:21.372255 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1220 02:14:21.373462 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1220 02:14:21.382159 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1220 02:14:24.842229 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
==> storage-provisioner [e389ed009c41] <==
I1220 02:13:15.862545 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1220 02:13:45.872285 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-032958 -n default-k8s-diff-port-032958
helpers_test.go:270: (dbg) Run: kubectl --context default-k8s-diff-port-032958 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:281: non-running pods: metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7
helpers_test.go:283: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: describe non-running pods <======
helpers_test.go:286: (dbg) Run: kubectl --context default-k8s-diff-port-032958 describe pod metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7
helpers_test.go:286: (dbg) Non-zero exit: kubectl --context default-k8s-diff-port-032958 describe pod metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7: exit status 1 (85.163393ms)
** stderr **
Error from server (NotFound): pods "metrics-server-746fcd58dc-r9hzl" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-wzcc7" not found
** /stderr **
helpers_test.go:288: kubectl --context default-k8s-diff-port-032958 describe pod metrics-server-746fcd58dc-r9hzl dashboard-metrics-scraper-6ffb444bf9-wzcc7: exit status 1
--- FAIL: TestStartStop/group/default-k8s-diff-port/serial/Pause (41.63s)