=== RUN TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p no-preload-480987 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 pause -p no-preload-480987 --alsologtostderr -v=1: (1.763339548s)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-480987 -n no-preload-480987
E1213 14:15:41.491103 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/auto-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1213 14:15:42.535885 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/false-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1213 14:15:45.848569 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/functional-427989/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-480987 -n no-preload-480987: exit status 2 (15.950136938s)
-- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: post-pause apiserver status = "Stopped"; want = "Paused"
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-480987 -n no-preload-480987
E1213 14:15:51.086360 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/enable-default-cni-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1213 14:15:53.648077 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/enable-default-cni-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1213 14:15:55.594591 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/custom-flannel-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1213 14:15:58.769666 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/enable-default-cni-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-480987 -n no-preload-480987: exit status 2 (15.782450512s)
-- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p no-preload-480987 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 unpause -p no-preload-480987 --alsologtostderr -v=1: (1.000034805s)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-480987 -n no-preload-480987
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-480987 -n no-preload-480987
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-480987 -n no-preload-480987
helpers_test.go:253: <<< TestStartStop/group/no-preload/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p no-preload-480987 logs -n 25
E1213 14:16:09.011357 20230 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/enable-default-cni-378767/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:256: (dbg) Done: out/minikube-linux-amd64 -p no-preload-480987 logs -n 25: (1.793498316s)
helpers_test.go:261: TestStartStop/group/no-preload/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ guest-719825 ssh which VBoxControl │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which wget │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which socat │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which git │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which podman │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which iptables │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which docker │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which curl │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /data | grep /data │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/minikube | grep /var/lib/minikube │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/cni | grep /var/lib/cni │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/docker | grep /var/lib/docker │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh cat /version.json │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND' │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ delete │ -p guest-719825 │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ image │ no-preload-480987 image list --format=json │ no-preload-480987 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ pause │ -p no-preload-480987 --alsologtostderr -v=1 │ no-preload-480987 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ addons │ enable metrics-server -p newest-cni-994510 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ stop │ -p newest-cni-994510 --alsologtostderr -v=3 │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:16 UTC │
│ addons │ enable dashboard -p newest-cni-994510 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:16 UTC │ 13 Dec 25 14:16 UTC │
│ start │ -p newest-cni-994510 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2 --kubernetes-version=v1.35.0-beta.0 │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:16 UTC │ │
│ unpause │ -p no-preload-480987 --alsologtostderr -v=1 │ no-preload-480987 │ jenkins │ v1.37.0 │ 13 Dec 25 14:16 UTC │ 13 Dec 25 14:16 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/13 14:16:01
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1213 14:16:01.125524 65660 out.go:360] Setting OutFile to fd 1 ...
I1213 14:16:01.125796 65660 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1213 14:16:01.125808 65660 out.go:374] Setting ErrFile to fd 2...
I1213 14:16:01.125813 65660 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1213 14:16:01.126005 65660 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22122-16298/.minikube/bin
I1213 14:16:01.126504 65660 out.go:368] Setting JSON to false
I1213 14:16:01.127470 65660 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":7112,"bootTime":1765628249,"procs":195,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1213 14:16:01.127542 65660 start.go:143] virtualization: kvm guest
I1213 14:16:01.130102 65660 out.go:179] * [newest-cni-994510] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1213 14:16:01.131794 65660 notify.go:221] Checking for updates...
I1213 14:16:01.131884 65660 out.go:179] - MINIKUBE_LOCATION=22122
I1213 14:16:01.133773 65660 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1213 14:16:01.135572 65660 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22122-16298/kubeconfig
I1213 14:16:01.137334 65660 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22122-16298/.minikube
I1213 14:16:01.138729 65660 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1213 14:16:01.140547 65660 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1213 14:16:01.142283 65660 config.go:182] Loaded profile config "newest-cni-994510": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0-beta.0
I1213 14:16:01.142955 65660 driver.go:422] Setting default libvirt URI to qemu:///system
I1213 14:16:01.181268 65660 out.go:179] * Using the kvm2 driver based on existing profile
I1213 14:16:01.182751 65660 start.go:309] selected driver: kvm2
I1213 14:16:01.182778 65660 start.go:927] validating driver "kvm2" against &{Name:newest-cni-994510 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22122/minikube-v1.37.0-1765613186-22122-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765275396-22083@sha256:ffa93f7bad1d2c0a7acfa6e97f1eec0e4955680d08c3904e49db297a10f7f89f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.35.0-beta.0 ClusterName:newest-cni-994510 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.114 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] Star
tHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1213 14:16:01.182906 65660 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1213 14:16:01.183932 65660 start_flags.go:1011] Waiting for components: map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true]
I1213 14:16:01.183971 65660 cni.go:84] Creating CNI manager for ""
I1213 14:16:01.184040 65660 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1213 14:16:01.184077 65660 start.go:353] cluster config:
{Name:newest-cni-994510 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22122/minikube-v1.37.0-1765613186-22122-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765275396-22083@sha256:ffa93f7bad1d2c0a7acfa6e97f1eec0e4955680d08c3904e49db297a10f7f89f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:newest-cni-994510 Namespace:default AP
IServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.114 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network:
Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1213 14:16:01.184172 65660 iso.go:125] acquiring lock: {Name:mkdb244ed0b6c01d7604ff94d6687c3511cb9170 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1213 14:16:01.186634 65660 out.go:179] * Starting "newest-cni-994510" primary control-plane node in "newest-cni-994510" cluster
I1213 14:16:01.188000 65660 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime docker
I1213 14:16:01.188043 65660 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22122-16298/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-beta.0-docker-overlay2-amd64.tar.lz4
I1213 14:16:01.188051 65660 cache.go:65] Caching tarball of preloaded images
I1213 14:16:01.188175 65660 preload.go:238] Found /home/jenkins/minikube-integration/22122-16298/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-beta.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1213 14:16:01.188192 65660 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0-beta.0 on docker
I1213 14:16:01.188372 65660 profile.go:143] Saving config to /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/newest-cni-994510/config.json ...
I1213 14:16:01.188674 65660 start.go:360] acquireMachinesLock for newest-cni-994510: {Name:mkb4e7ea4da4358e2127ad51f1ac2815f0b79c60 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1213 14:16:01.188729 65660 start.go:364] duration metric: took 30.792µs to acquireMachinesLock for "newest-cni-994510"
I1213 14:16:01.188745 65660 start.go:96] Skipping create...Using existing machine configuration
I1213 14:16:01.188750 65660 fix.go:54] fixHost starting:
I1213 14:16:01.191035 65660 fix.go:112] recreateIfNeeded on newest-cni-994510: state=Stopped err=<nil>
W1213 14:16:01.191077 65660 fix.go:138] unexpected machine state, will restart: <nil>
I1213 14:15:59.453385 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1213 14:15:59.453464 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:01.193182 65660 out.go:252] * Restarting existing kvm2 VM for "newest-cni-994510" ...
I1213 14:16:01.193250 65660 main.go:143] libmachine: starting domain...
I1213 14:16:01.193262 65660 main.go:143] libmachine: ensuring networks are active...
I1213 14:16:01.194575 65660 main.go:143] libmachine: Ensuring network default is active
I1213 14:16:01.195131 65660 main.go:143] libmachine: Ensuring network mk-newest-cni-994510 is active
I1213 14:16:01.195757 65660 main.go:143] libmachine: getting domain XML...
I1213 14:16:01.197197 65660 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>newest-cni-994510</name>
<uuid>30fbdf00-43d2-4fb6-8630-f0db2bc365e5</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/22122-16298/.minikube/machines/newest-cni-994510/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/22122-16298/.minikube/machines/newest-cni-994510/newest-cni-994510.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:44:4a:b4'/>
<source network='mk-newest-cni-994510'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:5a:df:a1'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1213 14:16:02.595304 65660 main.go:143] libmachine: waiting for domain to start...
I1213 14:16:02.596894 65660 main.go:143] libmachine: domain is now running
I1213 14:16:02.596945 65660 main.go:143] libmachine: waiting for IP...
I1213 14:16:02.597844 65660 main.go:143] libmachine: domain newest-cni-994510 has defined MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.598831 65660 main.go:143] libmachine: domain newest-cni-994510 has current primary IP address 192.168.72.114 and MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.598852 65660 main.go:143] libmachine: found domain IP: 192.168.72.114
I1213 14:16:02.598859 65660 main.go:143] libmachine: reserving static IP address...
I1213 14:16:02.599517 65660 main.go:143] libmachine: found host DHCP lease matching {name: "newest-cni-994510", mac: "52:54:00:44:4a:b4", ip: "192.168.72.114"} in network mk-newest-cni-994510: {Iface:virbr4 ExpiryTime:2025-12-13 15:15:09 +0000 UTC Type:0 Mac:52:54:00:44:4a:b4 Iaid: IPaddr:192.168.72.114 Prefix:24 Hostname:newest-cni-994510 Clientid:01:52:54:00:44:4a:b4}
I1213 14:16:02.599551 65660 main.go:143] libmachine: skip adding static IP to network mk-newest-cni-994510 - found existing host DHCP lease matching {name: "newest-cni-994510", mac: "52:54:00:44:4a:b4", ip: "192.168.72.114"}
I1213 14:16:02.599560 65660 main.go:143] libmachine: reserved static IP address 192.168.72.114 for domain newest-cni-994510
I1213 14:16:02.599566 65660 main.go:143] libmachine: waiting for SSH...
I1213 14:16:02.599571 65660 main.go:143] libmachine: Getting to WaitForSSH function...
I1213 14:16:02.602167 65660 main.go:143] libmachine: domain newest-cni-994510 has defined MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.602671 65660 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:44:4a:b4", ip: ""} in network mk-newest-cni-994510: {Iface:virbr4 ExpiryTime:2025-12-13 15:15:09 +0000 UTC Type:0 Mac:52:54:00:44:4a:b4 Iaid: IPaddr:192.168.72.114 Prefix:24 Hostname:newest-cni-994510 Clientid:01:52:54:00:44:4a:b4}
I1213 14:16:02.602700 65660 main.go:143] libmachine: domain newest-cni-994510 has defined IP address 192.168.72.114 and MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.602916 65660 main.go:143] libmachine: Using SSH client type: native
I1213 14:16:02.603157 65660 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.72.114 22 <nil> <nil>}
I1213 14:16:02.603168 65660 main.go:143] libmachine: About to run SSH command:
exit 0
I1213 14:16:05.663680 65660 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.114:22: connect: no route to host
I1213 14:16:04.456629 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1213 14:16:04.456680 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:05.791507 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": read tcp 192.168.61.1:54372->192.168.61.21:8444: read: connection reset by peer
I1213 14:16:05.791551 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:05.792084 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:05.944530 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:05.945425 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
==> Docker <==
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.200344652Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.200485128Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 13 14:15:20 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:15:20Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.237676952Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.238207133Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.246967518Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.247009573Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.978311358Z" level=info msg="ignoring event" container=c522abf03bd68d5546f765f4b5f89231a556fd352bdc3bf6c742a5b152ef313f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 13 14:15:21 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:15:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c1f03d7fa4950bf1999afa71cea62fd1bcf1d2684c789709041868d8f710fc0e/resolv.conf as [nameserver 10.96.0.10 search kube-system.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Dec 13 14:15:32 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:32.339669699Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:15:32 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:32.408770252Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:15:32 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:32.408895320Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 13 14:15:32 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:15:32Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 13 14:15:33 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:33.760728266Z" level=error msg="Handler for POST /v1.51/containers/7731d9ba696b/pause returned error: cannot pause container 7731d9ba696bc48dd0037f538a0957012f30009a9e05e971c946977be10ff36b: OCI runtime pause failed: container not running"
Dec 13 14:15:33 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:33.837874481Z" level=info msg="ignoring event" container=7731d9ba696bc48dd0037f538a0957012f30009a9e05e971c946977be10ff36b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 13 14:16:08 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:16:08Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Dec 13 14:16:08 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:16:08Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-9278n_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"df1ae620e7830da08877464b409a1a379127a6f2a24e16d49affeaf5da36304b\""
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.908764997Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.908814325Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.920308371Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.920350681Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:16:09 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:09.044310286Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:09.143181834Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:09.143362360Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 13 14:16:09 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:16:09Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
cc149c15604ed 6e38f40d628db 1 second ago Running storage-provisioner 2 7fe73cfac55b5 storage-provisioner kube-system
c87ce8eecf3dc kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 50 seconds ago Running kubernetes-dashboard 0 e4c80e4356825 kubernetes-dashboard-b84665fb8-qgkp8 kubernetes-dashboard
12db3d62fa358 56cc512116c8f 58 seconds ago Running busybox 1 c4d19dba95faf busybox default
df6bc06c07314 aa5e3ebc0dfed 59 seconds ago Running coredns 1 42e2df8bc0c2a coredns-7d764666f9-vqfqb kube-system
d56ac35f2023e 8a4ded35a3eb1 About a minute ago Running kube-proxy 1 4df6888cada75 kube-proxy-bcqzf kube-system
7731d9ba696bc 6e38f40d628db About a minute ago Exited storage-provisioner 1 7fe73cfac55b5 storage-provisioner kube-system
bb9406d173c82 7bb6219ddab95 About a minute ago Running kube-scheduler 1 598ae50e4090f kube-scheduler-no-preload-480987 kube-system
abc673268b8c4 a3e246e9556e9 About a minute ago Running etcd 1 f25680d6231bd etcd-no-preload-480987 kube-system
f15386049dc5d 45f3cc72d235f About a minute ago Running kube-controller-manager 1 7c3c0ac1e767d kube-controller-manager-no-preload-480987 kube-system
c04badbd06c59 aa9d02839d8de About a minute ago Running kube-apiserver 1 894e50d9bbd2f kube-apiserver-no-preload-480987 kube-system
a753bda60e00b gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 3efacce8eff61 busybox default
a83817d1e3a19 aa5e3ebc0dfed 2 minutes ago Exited coredns 0 bbeedeba027f5 coredns-7d764666f9-vqfqb kube-system
825b5a74aef54 8a4ded35a3eb1 2 minutes ago Exited kube-proxy 0 58393cab0a018 kube-proxy-bcqzf kube-system
dbcd28d379e9d 7bb6219ddab95 2 minutes ago Exited kube-scheduler 0 3aeb2c8b83364 kube-scheduler-no-preload-480987 kube-system
421c3cd800264 a3e246e9556e9 2 minutes ago Exited etcd 0 f584e9b37f307 etcd-no-preload-480987 kube-system
0a4ff8bbd246b 45f3cc72d235f 2 minutes ago Exited kube-controller-manager 0 3a909272bcfee kube-controller-manager-no-preload-480987 kube-system
15efb3b314731 aa9d02839d8de 2 minutes ago Exited kube-apiserver 0 6cd8631e870c0 kube-apiserver-no-preload-480987 kube-system
==> coredns [a83817d1e3a1] <==
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.13.1
linux/amd64, go1.25.2, 1db4568
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[ERROR] plugin/kubernetes: Failed to watch
[ERROR] plugin/kubernetes: Failed to watch
[ERROR] plugin/kubernetes: Failed to watch
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
[INFO] Reloading complete
[INFO] 127.0.0.1:48197 - 36083 "HINFO IN 948520708112921410.8802066444027197549. udp 56 false 512" NXDOMAIN qr,rd,ra 131 0.08414206s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [df6bc06c0731] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
CoreDNS-1.13.1
linux/amd64, go1.25.2, 1db4568
[INFO] 127.0.0.1:41339 - 43934 "HINFO IN 5178304912045032897.7220680391157509907. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.10370268s
==> describe nodes <==
Name: no-preload-480987
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=no-preload-480987
kubernetes.io/os=linux
minikube.k8s.io/commit=142a8bd7cb3f031b5f72a3965bb211dc77d9e1a7
minikube.k8s.io/name=no-preload-480987
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_12_13T14_13_20_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 13 Dec 2025 14:13:16 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: no-preload-480987
AcquireTime: <unset>
RenewTime: Sat, 13 Dec 2025 14:16:07 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:13:14 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:13:14 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:13:14 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:15:08 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.83.249
Hostname: no-preload-480987
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: a518b2b6861e4d398d1398567a956c88
System UUID: a518b2b6-861e-4d39-8d13-98567a956c88
Boot ID: f2072675-ae25-45ab-b1ff-1c552f111941
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.2
Kubelet Version: v1.35.0-beta.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m6s
kube-system coredns-7d764666f9-vqfqb 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m44s
kube-system etcd-no-preload-480987 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m49s
kube-system kube-apiserver-no-preload-480987 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m49s
kube-system kube-controller-manager-no-preload-480987 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m50s
kube-system kube-proxy-bcqzf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m44s
kube-system kube-scheduler-no-preload-480987 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m49s
kube-system metrics-server-5d785b57d4-5xl42 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 116s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m41s
kubernetes-dashboard dashboard-metrics-scraper-867fb5f87b-nkc9p 0 (0%) 0 (0%) 0 (0%) 0 (0%) 61s
kubernetes-dashboard kubernetes-dashboard-b84665fb8-qgkp8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 61s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 2m45s node-controller Node no-preload-480987 event: Registered Node no-preload-480987 in Controller
Normal RegisteredNode 64s node-controller Node no-preload-480987 event: Registered Node no-preload-480987 in Controller
==> dmesg <==
[Dec13 14:14] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.001357] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.010383] (rpcbind)[120]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.784672] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000030] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000003] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.155485] kauditd_printk_skb: 1 callbacks suppressed
[ +0.144137] kauditd_printk_skb: 393 callbacks suppressed
[ +1.726112] kauditd_printk_skb: 161 callbacks suppressed
[Dec13 14:15] kauditd_printk_skb: 110 callbacks suppressed
[ +0.000056] kauditd_printk_skb: 110 callbacks suppressed
[ +5.837184] kauditd_printk_skb: 223 callbacks suppressed
[ +0.228037] kauditd_printk_skb: 72 callbacks suppressed
[Dec13 14:16] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [421c3cd80026] <==
{"level":"warn","ts":"2025-12-13T14:13:15.360075Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45152","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.366738Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45172","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.382219Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45192","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.388352Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45214","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.481328Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45230","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-12-13T14:13:30.479876Z","caller":"traceutil/trace.go:172","msg":"trace[1833513221] transaction","detail":"{read_only:false; response_revision:438; number_of_response:1; }","duration":"124.821045ms","start":"2025-12-13T14:13:30.354990Z","end":"2025-12-13T14:13:30.479811Z","steps":["trace[1833513221] 'process raft request' (duration: 124.585013ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:13:30.758630Z","caller":"traceutil/trace.go:172","msg":"trace[2140602732] transaction","detail":"{read_only:false; response_revision:439; number_of_response:1; }","duration":"136.602392ms","start":"2025-12-13T14:13:30.622014Z","end":"2025-12-13T14:13:30.758616Z","steps":["trace[2140602732] 'process raft request' (duration: 136.409305ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:14:15.200825Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-12-13T14:14:15.202393Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"no-preload-480987","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.249:2380"],"advertise-client-urls":["https://192.168.83.249:2379"]}
{"level":"error","ts":"2025-12-13T14:14:15.202578Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-13T14:14:22.207006Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-13T14:14:22.210578Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-13T14:14:22.210910Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"f03e5af8f7ea6d24","current-leader-member-id":"f03e5af8f7ea6d24"}
{"level":"info","ts":"2025-12-13T14:14:22.211541Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
{"level":"info","ts":"2025-12-13T14:14:22.211817Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"warn","ts":"2025-12-13T14:14:22.214632Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-13T14:14:22.214878Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-13T14:14:22.214910Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-12-13T14:14:22.215259Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.83.249:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-13T14:14:22.215416Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.83.249:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-13T14:14:22.215558Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.249:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-13T14:14:22.218997Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.83.249:2380"}
{"level":"error","ts":"2025-12-13T14:14:22.219273Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.249:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-13T14:14:22.219421Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.83.249:2380"}
{"level":"info","ts":"2025-12-13T14:14:22.219571Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"no-preload-480987","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.249:2380"],"advertise-client-urls":["https://192.168.83.249:2379"]}
==> etcd [abc673268b8c] <==
{"level":"warn","ts":"2025-12-13T14:15:00.561226Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49132","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.567555Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49136","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.582549Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49164","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.597405Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49196","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.610812Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49228","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.623256Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49264","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.636624Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49268","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.646981Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49282","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.659299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49294","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.682561Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49298","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.687891Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49324","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.715178Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49346","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.740572Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49378","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.754560Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49402","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.765201Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49418","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.833533Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49436","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-12-13T14:15:16.036041Z","caller":"traceutil/trace.go:172","msg":"trace[398394203] linearizableReadLoop","detail":"{readStateIndex:775; appliedIndex:775; }","duration":"193.985562ms","start":"2025-12-13T14:15:15.842027Z","end":"2025-12-13T14:15:16.036013Z","steps":["trace[398394203] 'read index received' (duration: 193.980301ms)","trace[398394203] 'applied index is now lower than readState.Index' (duration: 4.69µs)"],"step_count":2}
{"level":"warn","ts":"2025-12-13T14:15:16.036309Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"194.210969ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-13T14:15:16.036359Z","caller":"traceutil/trace.go:172","msg":"trace[742166215] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:727; }","duration":"194.326868ms","start":"2025-12-13T14:15:15.842021Z","end":"2025-12-13T14:15:16.036348Z","steps":["trace[742166215] 'agreement among raft nodes before linearized reading' (duration: 194.179953ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:21.850284Z","caller":"traceutil/trace.go:172","msg":"trace[1856919751] transaction","detail":"{read_only:false; response_revision:752; number_of_response:1; }","duration":"119.966499ms","start":"2025-12-13T14:15:21.730293Z","end":"2025-12-13T14:15:21.850259Z","steps":["trace[1856919751] 'process raft request' (duration: 119.771866ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:21.870555Z","caller":"traceutil/trace.go:172","msg":"trace[1096756970] transaction","detail":"{read_only:false; response_revision:753; number_of_response:1; }","duration":"139.221775ms","start":"2025-12-13T14:15:21.731316Z","end":"2025-12-13T14:15:21.870538Z","steps":["trace[1096756970] 'process raft request' (duration: 139.104459ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:22.391408Z","caller":"traceutil/trace.go:172","msg":"trace[135788815] linearizableReadLoop","detail":"{readStateIndex:807; appliedIndex:807; }","duration":"118.341068ms","start":"2025-12-13T14:15:22.273045Z","end":"2025-12-13T14:15:22.391386Z","steps":["trace[135788815] 'read index received' (duration: 118.33172ms)","trace[135788815] 'applied index is now lower than readState.Index' (duration: 8.341µs)"],"step_count":2}
{"level":"warn","ts":"2025-12-13T14:15:22.391569Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"118.496908ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-13T14:15:22.391603Z","caller":"traceutil/trace.go:172","msg":"trace[588260666] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:757; }","duration":"118.552652ms","start":"2025-12-13T14:15:22.273037Z","end":"2025-12-13T14:15:22.391589Z","steps":["trace[588260666] 'agreement among raft nodes before linearized reading' (duration: 118.470061ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:22.391585Z","caller":"traceutil/trace.go:172","msg":"trace[600189230] transaction","detail":"{read_only:false; response_revision:758; number_of_response:1; }","duration":"155.435049ms","start":"2025-12-13T14:15:22.236137Z","end":"2025-12-13T14:15:22.391572Z","steps":["trace[600189230] 'process raft request' (duration: 155.304345ms)"],"step_count":1}
==> kernel <==
14:16:09 up 1 min, 0 users, load average: 2.14, 0.81, 0.30
Linux no-preload-480987 6.6.95 #1 SMP PREEMPT_DYNAMIC Sat Dec 13 11:18:23 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [15efb3b31473] <==
W1213 14:14:24.445471 1 logging.go:55] [core] [Channel #111 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.581281 1 logging.go:55] [core] [Channel #47 SubChannel #49]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.594077 1 logging.go:55] [core] [Channel #195 SubChannel #197]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.594172 1 logging.go:55] [core] [Channel #247 SubChannel #249]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.618785 1 logging.go:55] [core] [Channel #123 SubChannel #125]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.664786 1 logging.go:55] [core] [Channel #223 SubChannel #225]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.697063 1 logging.go:55] [core] [Channel #131 SubChannel #133]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.708234 1 logging.go:55] [core] [Channel #127 SubChannel #129]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.756333 1 logging.go:55] [core] [Channel #87 SubChannel #89]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.762851 1 logging.go:55] [core] [Channel #75 SubChannel #77]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.774633 1 logging.go:55] [core] [Channel #99 SubChannel #101]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.796479 1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.805360 1 logging.go:55] [core] [Channel #243 SubChannel #245]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.817094 1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.822343 1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.872137 1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.890323 1 logging.go:55] [core] [Channel #155 SubChannel #157]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.926833 1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.966475 1 logging.go:55] [core] [Channel #43 SubChannel #45]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.985546 1 logging.go:55] [core] [Channel #79 SubChannel #81]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.988201 1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.999081 1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:25.021870 1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:25.025487 1 logging.go:55] [core] [Channel #239 SubChannel #241]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:25.154191 1 logging.go:55] [core] [Channel #211 SubChannel #213]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [c04badbd06c5] <==
E1213 14:15:02.854728 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1213 14:15:02.855393 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1213 14:15:03.482345 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.83.249]
I1213 14:15:03.487471 1 controller.go:667] quota admission added evaluator for: endpoints
I1213 14:15:04.195966 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1213 14:15:04.274505 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1213 14:15:04.337748 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1213 14:15:04.356516 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1213 14:15:05.251412 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1213 14:15:05.435965 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1213 14:15:07.692640 1 controller.go:667] quota admission added evaluator for: namespaces
I1213 14:15:08.351748 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.102.118.59"}
I1213 14:15:08.403429 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.111.234.215"}
W1213 14:16:06.893461 1 handler_proxy.go:99] no RequestInfo found in the context
E1213 14:16:06.893817 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1213 14:16:06.893851 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1213 14:16:06.962470 1 handler_proxy.go:99] no RequestInfo found in the context
E1213 14:16:06.969263 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1213 14:16:06.969326 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [0a4ff8bbd246] <==
I1213 14:13:24.171667 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173371 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173673 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.227517 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:13:24.171973 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173119 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173204 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173288 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173453 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173836 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174040 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.243849 1 range_allocator.go:177] "Sending events to api server"
I1213 14:13:24.243896 1 range_allocator.go:181] "Starting range CIDR allocator"
I1213 14:13:24.243904 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:13:24.243916 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174139 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174232 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174313 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174392 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.288212 1 range_allocator.go:433] "Set node PodCIDR" node="no-preload-480987" podCIDRs=["10.244.0.0/24"]
I1213 14:13:24.328300 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.372441 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.372523 1 garbagecollector.go:166] "Garbage collector: all resource monitors have synced"
I1213 14:13:24.372530 1 garbagecollector.go:169] "Proceeding to collect garbage"
I1213 14:13:29.188585 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-controller-manager [f15386049dc5] <==
I1213 14:15:05.123173 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.127814 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.145713 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.145767 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.145867 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.148926 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.151356 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.153402 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" zone=""
I1213 14:15:05.131028 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.166930 1 garbagecollector.go:792] "failed to discover some groups" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
I1213 14:15:05.168763 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" node="no-preload-480987"
I1213 14:15:05.168796 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.178980 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
I1213 14:15:05.275565 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:15:05.376144 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.376162 1 garbagecollector.go:166] "Garbage collector: all resource monitors have synced"
I1213 14:15:05.376168 1 garbagecollector.go:169] "Proceeding to collect garbage"
I1213 14:15:05.377426 1 shared_informer.go:377] "Caches are synced"
E1213 14:15:07.975850 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b\" failed with pods \"dashboard-metrics-scraper-867fb5f87b-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1213 14:15:08.023416 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b\" failed with pods \"dashboard-metrics-scraper-867fb5f87b-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1213 14:15:08.076776 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b\" failed with pods \"dashboard-metrics-scraper-867fb5f87b-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1213 14:15:08.087381 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-b84665fb8\" failed with pods \"kubernetes-dashboard-b84665fb8-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1213 14:15:10.180464 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
E1213 14:16:06.970751 1 resource_quota_controller.go:460] "Error during resource discovery" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
I1213 14:16:06.998273 1 garbagecollector.go:792] "failed to discover some groups" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
==> kube-proxy [825b5a74aef5] <==
I1213 14:13:27.434952 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:13:27.537239 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:27.537315 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.249"]
E1213 14:13:27.541477 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1213 14:13:27.890996 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1213 14:13:27.891076 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1213 14:13:27.891101 1 server_linux.go:136] "Using iptables Proxier"
I1213 14:13:28.046345 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1213 14:13:28.047596 1 server.go:529] "Version info" version="v1.35.0-beta.0"
I1213 14:13:28.047613 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 14:13:28.069007 1 config.go:200] "Starting service config controller"
I1213 14:13:28.069351 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1213 14:13:28.069786 1 config.go:106] "Starting endpoint slice config controller"
I1213 14:13:28.069797 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1213 14:13:28.084631 1 config.go:403] "Starting serviceCIDR config controller"
I1213 14:13:28.084652 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1213 14:13:28.092180 1 config.go:309] "Starting node config controller"
I1213 14:13:28.092221 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1213 14:13:28.092229 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1213 14:13:28.172328 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1213 14:13:28.172494 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1213 14:13:28.185119 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-proxy [d56ac35f2023] <==
I1213 14:15:04.179196 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:15:04.280524 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:04.282161 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.249"]
E1213 14:15:04.282304 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1213 14:15:04.416551 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1213 14:15:04.416804 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1213 14:15:04.416966 1 server_linux.go:136] "Using iptables Proxier"
I1213 14:15:04.483468 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1213 14:15:04.486426 1 server.go:529] "Version info" version="v1.35.0-beta.0"
I1213 14:15:04.486470 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 14:15:04.514733 1 config.go:200] "Starting service config controller"
I1213 14:15:04.514829 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1213 14:15:04.514848 1 config.go:106] "Starting endpoint slice config controller"
I1213 14:15:04.514852 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1213 14:15:04.514869 1 config.go:403] "Starting serviceCIDR config controller"
I1213 14:15:04.514873 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1213 14:15:04.531338 1 config.go:309] "Starting node config controller"
I1213 14:15:04.547621 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1213 14:15:04.549356 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1213 14:15:04.619402 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1213 14:15:04.632403 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1213 14:15:04.632548 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [bb9406d173c8] <==
I1213 14:14:59.768053 1 serving.go:386] Generated self-signed cert in-memory
W1213 14:15:01.618693 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1213 14:15:01.618832 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1213 14:15:01.618857 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W1213 14:15:01.619158 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1213 14:15:01.741589 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0-beta.0"
I1213 14:15:01.741634 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 14:15:01.749900 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1213 14:15:01.755671 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1213 14:15:01.758170 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:15:01.758530 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1213 14:15:01.859577 1 shared_informer.go:377] "Caches are synced"
==> kube-scheduler [dbcd28d379e9] <==
E1213 14:13:17.759489 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot watch resource \"replicationcontrollers\" in API group \"\" at the cluster scope"
E1213 14:13:17.761454 1 reflector.go:204] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.ReplicationController"
E1213 14:13:17.807793 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope"
E1213 14:13:17.810508 1 reflector.go:204] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.StorageClass"
E1213 14:13:17.828149 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot watch resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope"
E1213 14:13:17.830273 1 reflector.go:204] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.PersistentVolumeClaim"
E1213 14:13:17.838735 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope"
E1213 14:13:17.842088 1 reflector.go:204] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.CSINode"
E1213 14:13:17.864932 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot watch resource \"configmaps\" in API group \"\" in the namespace \"kube-system\""
E1213 14:13:17.868183 1 reflector.go:204] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1693" type="*v1.ConfigMap"
E1213 14:13:17.872924 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope"
E1213 14:13:17.874635 1 reflector.go:204] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.DeviceClass"
E1213 14:13:17.963042 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="services is forbidden: User \"system:kube-scheduler\" cannot watch resource \"services\" in API group \"\" at the cluster scope"
E1213 14:13:17.965851 1 reflector.go:204] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.Service"
E1213 14:13:17.991884 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="pods is forbidden: User \"system:kube-scheduler\" cannot watch resource \"pods\" in API group \"\" at the cluster scope"
E1213 14:13:17.995477 1 reflector.go:204] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.Pod"
E1213 14:13:18.019764 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope"
E1213 14:13:18.022894 1 reflector.go:204] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.CSIStorageCapacity"
E1213 14:13:18.028239 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope"
E1213 14:13:18.030500 1 reflector.go:204] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.ResourceSlice"
I1213 14:13:19.979206 1 shared_informer.go:377] "Caches are synced"
I1213 14:14:15.158169 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1213 14:14:15.161149 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1213 14:14:15.161158 1 server.go:265] "[graceful-termination] secure server is exiting"
E1213 14:14:15.161189 1 run.go:72] "command failed" err="finished without leader elect"
==> kubelet <==
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.518425 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d2ea632f31ec6ee33b64b33739a273e0-k8s-certs\") pod \"kube-controller-manager-no-preload-480987\" (UID: \"d2ea632f31ec6ee33b64b33739a273e0\") " pod="kube-system/kube-controller-manager-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.518449 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d2ea632f31ec6ee33b64b33739a273e0-kubeconfig\") pod \"kube-controller-manager-no-preload-480987\" (UID: \"d2ea632f31ec6ee33b64b33739a273e0\") " pod="kube-system/kube-controller-manager-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.553159 4407 apiserver.go:52] "Watching apiserver"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619393 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/04edfc90843076c87e63de2a69653f0a-k8s-certs\") pod \"kube-apiserver-no-preload-480987\" (UID: \"04edfc90843076c87e63de2a69653f0a\") " pod="kube-system/kube-apiserver-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619768 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/04edfc90843076c87e63de2a69653f0a-usr-share-ca-certificates\") pod \"kube-apiserver-no-preload-480987\" (UID: \"04edfc90843076c87e63de2a69653f0a\") " pod="kube-system/kube-apiserver-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619803 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/6eb935396208fda222fa24605b775590-etcd-certs\") pod \"etcd-no-preload-480987\" (UID: \"6eb935396208fda222fa24605b775590\") " pod="kube-system/etcd-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619860 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/04edfc90843076c87e63de2a69653f0a-ca-certs\") pod \"kube-apiserver-no-preload-480987\" (UID: \"04edfc90843076c87e63de2a69653f0a\") " pod="kube-system/kube-apiserver-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619874 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/6eb935396208fda222fa24605b775590-etcd-data\") pod \"etcd-no-preload-480987\" (UID: \"6eb935396208fda222fa24605b775590\") " pod="kube-system/etcd-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.660740 4407 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.720736 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9fc17afa-ac4f-4744-b380-dff0bf9d5f12-lib-modules\") pod \"kube-proxy-bcqzf\" (UID: \"9fc17afa-ac4f-4744-b380-dff0bf9d5f12\") " pod="kube-system/kube-proxy-bcqzf"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.720913 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/9fc17afa-ac4f-4744-b380-dff0bf9d5f12-xtables-lock\") pod \"kube-proxy-bcqzf\" (UID: \"9fc17afa-ac4f-4744-b380-dff0bf9d5f12\") " pod="kube-system/kube-proxy-bcqzf"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.720958 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/45d578da-7a44-4ee1-8dd2-77c3d4816633-tmp\") pod \"storage-provisioner\" (UID: \"45d578da-7a44-4ee1-8dd2-77c3d4816633\") " pod="kube-system/storage-provisioner"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.871656 4407 scope.go:122] "RemoveContainer" containerID="7731d9ba696bc48dd0037f538a0957012f30009a9e05e971c946977be10ff36b"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.922718 4407 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.922803 4407 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.923293 4407 kuberuntime_manager.go:1664] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-5d785b57d4-5xl42_kube-system(8be1da2d-1636-4055-8e9d-5ff3844c3e45): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.923332 4407 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-5d785b57d4-5xl42" podUID="8be1da2d-1636-4055-8e9d-5ff3844c3e45"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.152638 4407 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.152706 4407 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.153039 4407 kuberuntime_manager.go:1664] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-867fb5f87b-nkc9p_kubernetes-dashboard(9ed00e7f-fb97-46c1-bba9-05beb5234b7e): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.153476 4407 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b-nkc9p" podUID="9ed00e7f-fb97-46c1-bba9-05beb5234b7e"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.420640 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-no-preload-480987" containerName="etcd"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.425333 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-no-preload-480987" containerName="kube-apiserver"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.425655 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-no-preload-480987" containerName="kube-scheduler"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.426720 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-no-preload-480987" containerName="kube-controller-manager"
==> kubernetes-dashboard [c87ce8eecf3d] <==
2025/12/13 14:15:20 Starting overwatch
2025/12/13 14:15:20 Using namespace: kubernetes-dashboard
2025/12/13 14:15:20 Using in-cluster config to connect to apiserver
2025/12/13 14:15:20 Using secret token for csrf signing
2025/12/13 14:15:20 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/12/13 14:15:20 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/12/13 14:15:20 Successful initial request to the apiserver, version: v1.35.0-beta.0
2025/12/13 14:15:20 Generating JWE encryption key
2025/12/13 14:15:20 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/12/13 14:15:20 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/12/13 14:15:21 Initializing JWE encryption key from synchronized object
2025/12/13 14:15:21 Creating in-cluster Sidecar client
2025/12/13 14:15:21 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/12/13 14:15:21 Serving insecurely on HTTP port: 9090
2025/12/13 14:16:07 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [7731d9ba696b] <==
I1213 14:15:03.625799 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1213 14:15:33.643923 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [cc149c15604e] <==
I1213 14:16:09.293714 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1213 14:16:09.325031 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1213 14:16:09.325859 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1213 14:16:09.334355 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-480987 -n no-preload-480987
helpers_test.go:270: (dbg) Run: kubectl --context no-preload-480987 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:281: non-running pods: metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p
helpers_test.go:283: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: describe non-running pods <======
helpers_test.go:286: (dbg) Run: kubectl --context no-preload-480987 describe pod metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p
helpers_test.go:286: (dbg) Non-zero exit: kubectl --context no-preload-480987 describe pod metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p: exit status 1 (86.881641ms)
** stderr **
Error from server (NotFound): pods "metrics-server-5d785b57d4-5xl42" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-867fb5f87b-nkc9p" not found
** /stderr **
helpers_test.go:288: kubectl --context no-preload-480987 describe pod metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p: exit status 1
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-480987 -n no-preload-480987
helpers_test.go:253: <<< TestStartStop/group/no-preload/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p no-preload-480987 logs -n 25
helpers_test.go:256: (dbg) Done: out/minikube-linux-amd64 -p no-preload-480987 logs -n 25: (1.479419028s)
helpers_test.go:261: TestStartStop/group/no-preload/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ guest-719825 ssh which VBoxControl │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which wget │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which socat │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which git │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which podman │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which iptables │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which docker │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh which curl │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /data | grep /data │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/minikube | grep /var/lib/minikube │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/cni | grep /var/lib/cni │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh df -t ext4 /var/lib/docker | grep /var/lib/docker │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh cat /version.json │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ ssh │ guest-719825 ssh test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND' │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ delete │ -p guest-719825 │ guest-719825 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ image │ no-preload-480987 image list --format=json │ no-preload-480987 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ pause │ -p no-preload-480987 --alsologtostderr -v=1 │ no-preload-480987 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ addons │ enable metrics-server -p newest-cni-994510 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:15 UTC │
│ stop │ -p newest-cni-994510 --alsologtostderr -v=3 │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:15 UTC │ 13 Dec 25 14:16 UTC │
│ addons │ enable dashboard -p newest-cni-994510 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:16 UTC │ 13 Dec 25 14:16 UTC │
│ start │ -p newest-cni-994510 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2 --kubernetes-version=v1.35.0-beta.0 │ newest-cni-994510 │ jenkins │ v1.37.0 │ 13 Dec 25 14:16 UTC │ │
│ unpause │ -p no-preload-480987 --alsologtostderr -v=1 │ no-preload-480987 │ jenkins │ v1.37.0 │ 13 Dec 25 14:16 UTC │ 13 Dec 25 14:16 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/13 14:16:01
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1213 14:16:01.125524 65660 out.go:360] Setting OutFile to fd 1 ...
I1213 14:16:01.125796 65660 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1213 14:16:01.125808 65660 out.go:374] Setting ErrFile to fd 2...
I1213 14:16:01.125813 65660 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1213 14:16:01.126005 65660 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22122-16298/.minikube/bin
I1213 14:16:01.126504 65660 out.go:368] Setting JSON to false
I1213 14:16:01.127470 65660 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":7112,"bootTime":1765628249,"procs":195,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1213 14:16:01.127542 65660 start.go:143] virtualization: kvm guest
I1213 14:16:01.130102 65660 out.go:179] * [newest-cni-994510] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1213 14:16:01.131794 65660 notify.go:221] Checking for updates...
I1213 14:16:01.131884 65660 out.go:179] - MINIKUBE_LOCATION=22122
I1213 14:16:01.133773 65660 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1213 14:16:01.135572 65660 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22122-16298/kubeconfig
I1213 14:16:01.137334 65660 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22122-16298/.minikube
I1213 14:16:01.138729 65660 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1213 14:16:01.140547 65660 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1213 14:16:01.142283 65660 config.go:182] Loaded profile config "newest-cni-994510": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0-beta.0
I1213 14:16:01.142955 65660 driver.go:422] Setting default libvirt URI to qemu:///system
I1213 14:16:01.181268 65660 out.go:179] * Using the kvm2 driver based on existing profile
I1213 14:16:01.182751 65660 start.go:309] selected driver: kvm2
I1213 14:16:01.182778 65660 start.go:927] validating driver "kvm2" against &{Name:newest-cni-994510 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22122/minikube-v1.37.0-1765613186-22122-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765275396-22083@sha256:ffa93f7bad1d2c0a7acfa6e97f1eec0e4955680d08c3904e49db297a10f7f89f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.35.0-beta.0 ClusterName:newest-cni-994510 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.114 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] Star
tHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1213 14:16:01.182906 65660 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1213 14:16:01.183932 65660 start_flags.go:1011] Waiting for components: map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true]
I1213 14:16:01.183971 65660 cni.go:84] Creating CNI manager for ""
I1213 14:16:01.184040 65660 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1213 14:16:01.184077 65660 start.go:353] cluster config:
{Name:newest-cni-994510 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22122/minikube-v1.37.0-1765613186-22122-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765275396-22083@sha256:ffa93f7bad1d2c0a7acfa6e97f1eec0e4955680d08c3904e49db297a10f7f89f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-beta.0 ClusterName:newest-cni-994510 Namespace:default AP
IServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.114 Port:8443 KubernetesVersion:v1.35.0-beta.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network:
Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1213 14:16:01.184172 65660 iso.go:125] acquiring lock: {Name:mkdb244ed0b6c01d7604ff94d6687c3511cb9170 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1213 14:16:01.186634 65660 out.go:179] * Starting "newest-cni-994510" primary control-plane node in "newest-cni-994510" cluster
I1213 14:16:01.188000 65660 preload.go:188] Checking if preload exists for k8s version v1.35.0-beta.0 and runtime docker
I1213 14:16:01.188043 65660 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22122-16298/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-beta.0-docker-overlay2-amd64.tar.lz4
I1213 14:16:01.188051 65660 cache.go:65] Caching tarball of preloaded images
I1213 14:16:01.188175 65660 preload.go:238] Found /home/jenkins/minikube-integration/22122-16298/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-beta.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1213 14:16:01.188192 65660 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0-beta.0 on docker
I1213 14:16:01.188372 65660 profile.go:143] Saving config to /home/jenkins/minikube-integration/22122-16298/.minikube/profiles/newest-cni-994510/config.json ...
I1213 14:16:01.188674 65660 start.go:360] acquireMachinesLock for newest-cni-994510: {Name:mkb4e7ea4da4358e2127ad51f1ac2815f0b79c60 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1213 14:16:01.188729 65660 start.go:364] duration metric: took 30.792µs to acquireMachinesLock for "newest-cni-994510"
I1213 14:16:01.188745 65660 start.go:96] Skipping create...Using existing machine configuration
I1213 14:16:01.188750 65660 fix.go:54] fixHost starting:
I1213 14:16:01.191035 65660 fix.go:112] recreateIfNeeded on newest-cni-994510: state=Stopped err=<nil>
W1213 14:16:01.191077 65660 fix.go:138] unexpected machine state, will restart: <nil>
I1213 14:15:59.453385 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1213 14:15:59.453464 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:01.193182 65660 out.go:252] * Restarting existing kvm2 VM for "newest-cni-994510" ...
I1213 14:16:01.193250 65660 main.go:143] libmachine: starting domain...
I1213 14:16:01.193262 65660 main.go:143] libmachine: ensuring networks are active...
I1213 14:16:01.194575 65660 main.go:143] libmachine: Ensuring network default is active
I1213 14:16:01.195131 65660 main.go:143] libmachine: Ensuring network mk-newest-cni-994510 is active
I1213 14:16:01.195757 65660 main.go:143] libmachine: getting domain XML...
I1213 14:16:01.197197 65660 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>newest-cni-994510</name>
<uuid>30fbdf00-43d2-4fb6-8630-f0db2bc365e5</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/22122-16298/.minikube/machines/newest-cni-994510/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/22122-16298/.minikube/machines/newest-cni-994510/newest-cni-994510.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:44:4a:b4'/>
<source network='mk-newest-cni-994510'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:5a:df:a1'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1213 14:16:02.595304 65660 main.go:143] libmachine: waiting for domain to start...
I1213 14:16:02.596894 65660 main.go:143] libmachine: domain is now running
I1213 14:16:02.596945 65660 main.go:143] libmachine: waiting for IP...
I1213 14:16:02.597844 65660 main.go:143] libmachine: domain newest-cni-994510 has defined MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.598831 65660 main.go:143] libmachine: domain newest-cni-994510 has current primary IP address 192.168.72.114 and MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.598852 65660 main.go:143] libmachine: found domain IP: 192.168.72.114
I1213 14:16:02.598859 65660 main.go:143] libmachine: reserving static IP address...
I1213 14:16:02.599517 65660 main.go:143] libmachine: found host DHCP lease matching {name: "newest-cni-994510", mac: "52:54:00:44:4a:b4", ip: "192.168.72.114"} in network mk-newest-cni-994510: {Iface:virbr4 ExpiryTime:2025-12-13 15:15:09 +0000 UTC Type:0 Mac:52:54:00:44:4a:b4 Iaid: IPaddr:192.168.72.114 Prefix:24 Hostname:newest-cni-994510 Clientid:01:52:54:00:44:4a:b4}
I1213 14:16:02.599551 65660 main.go:143] libmachine: skip adding static IP to network mk-newest-cni-994510 - found existing host DHCP lease matching {name: "newest-cni-994510", mac: "52:54:00:44:4a:b4", ip: "192.168.72.114"}
I1213 14:16:02.599560 65660 main.go:143] libmachine: reserved static IP address 192.168.72.114 for domain newest-cni-994510
I1213 14:16:02.599566 65660 main.go:143] libmachine: waiting for SSH...
I1213 14:16:02.599571 65660 main.go:143] libmachine: Getting to WaitForSSH function...
I1213 14:16:02.602167 65660 main.go:143] libmachine: domain newest-cni-994510 has defined MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.602671 65660 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:44:4a:b4", ip: ""} in network mk-newest-cni-994510: {Iface:virbr4 ExpiryTime:2025-12-13 15:15:09 +0000 UTC Type:0 Mac:52:54:00:44:4a:b4 Iaid: IPaddr:192.168.72.114 Prefix:24 Hostname:newest-cni-994510 Clientid:01:52:54:00:44:4a:b4}
I1213 14:16:02.602700 65660 main.go:143] libmachine: domain newest-cni-994510 has defined IP address 192.168.72.114 and MAC address 52:54:00:44:4a:b4 in network mk-newest-cni-994510
I1213 14:16:02.602916 65660 main.go:143] libmachine: Using SSH client type: native
I1213 14:16:02.603157 65660 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.72.114 22 <nil> <nil>}
I1213 14:16:02.603168 65660 main.go:143] libmachine: About to run SSH command:
exit 0
I1213 14:16:05.663680 65660 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.114:22: connect: no route to host
I1213 14:16:04.456629 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1213 14:16:04.456680 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:05.791507 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": read tcp 192.168.61.1:54372->192.168.61.21:8444: read: connection reset by peer
I1213 14:16:05.791551 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:05.792084 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:05.944530 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:05.945425 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:06.444164 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:06.445072 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:06.944960 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:06.945938 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:07.444767 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:07.446109 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:07.944789 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:07.945707 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:08.444385 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:08.445288 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:08.945143 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:08.945902 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:09.444766 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:09.445649 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:09.944346 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:09.945163 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:10.445127 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:10.445955 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
I1213 14:16:10.944653 64658 api_server.go:253] Checking apiserver healthz at https://192.168.61.21:8444/healthz ...
I1213 14:16:10.945347 64658 api_server.go:269] stopped: https://192.168.61.21:8444/healthz: Get "https://192.168.61.21:8444/healthz": dial tcp 192.168.61.21:8444: connect: connection refused
==> Docker <==
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.200344652Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.200485128Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 13 14:15:20 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:15:20Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.237676952Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.238207133Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.246967518Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.247009573Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:15:20 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:20.978311358Z" level=info msg="ignoring event" container=c522abf03bd68d5546f765f4b5f89231a556fd352bdc3bf6c742a5b152ef313f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 13 14:15:21 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:15:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c1f03d7fa4950bf1999afa71cea62fd1bcf1d2684c789709041868d8f710fc0e/resolv.conf as [nameserver 10.96.0.10 search kube-system.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Dec 13 14:15:32 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:32.339669699Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:15:32 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:32.408770252Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:15:32 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:32.408895320Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 13 14:15:32 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:15:32Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Dec 13 14:15:33 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:33.760728266Z" level=error msg="Handler for POST /v1.51/containers/7731d9ba696b/pause returned error: cannot pause container 7731d9ba696bc48dd0037f538a0957012f30009a9e05e971c946977be10ff36b: OCI runtime pause failed: container not running"
Dec 13 14:15:33 no-preload-480987 dockerd[1186]: time="2025-12-13T14:15:33.837874481Z" level=info msg="ignoring event" container=7731d9ba696bc48dd0037f538a0957012f30009a9e05e971c946977be10ff36b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 13 14:16:08 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:16:08Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Dec 13 14:16:08 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:16:08Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-9278n_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"df1ae620e7830da08877464b409a1a379127a6f2a24e16d49affeaf5da36304b\""
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.908764997Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.908814325Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.920308371Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Dec 13 14:16:08 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:08.920350681Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Dec 13 14:16:09 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:09.044310286Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:09.143181834Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 dockerd[1186]: time="2025-12-13T14:16:09.143362360Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Dec 13 14:16:09 no-preload-480987 cri-dockerd[1566]: time="2025-12-13T14:16:09Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
cc149c15604ed 6e38f40d628db 3 seconds ago Running storage-provisioner 2 7fe73cfac55b5 storage-provisioner kube-system
c87ce8eecf3dc kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 52 seconds ago Running kubernetes-dashboard 0 e4c80e4356825 kubernetes-dashboard-b84665fb8-qgkp8 kubernetes-dashboard
12db3d62fa358 56cc512116c8f About a minute ago Running busybox 1 c4d19dba95faf busybox default
df6bc06c07314 aa5e3ebc0dfed About a minute ago Running coredns 1 42e2df8bc0c2a coredns-7d764666f9-vqfqb kube-system
d56ac35f2023e 8a4ded35a3eb1 About a minute ago Running kube-proxy 1 4df6888cada75 kube-proxy-bcqzf kube-system
7731d9ba696bc 6e38f40d628db About a minute ago Exited storage-provisioner 1 7fe73cfac55b5 storage-provisioner kube-system
bb9406d173c82 7bb6219ddab95 About a minute ago Running kube-scheduler 1 598ae50e4090f kube-scheduler-no-preload-480987 kube-system
abc673268b8c4 a3e246e9556e9 About a minute ago Running etcd 1 f25680d6231bd etcd-no-preload-480987 kube-system
f15386049dc5d 45f3cc72d235f About a minute ago Running kube-controller-manager 1 7c3c0ac1e767d kube-controller-manager-no-preload-480987 kube-system
c04badbd06c59 aa9d02839d8de About a minute ago Running kube-apiserver 1 894e50d9bbd2f kube-apiserver-no-preload-480987 kube-system
a753bda60e00b gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 3efacce8eff61 busybox default
a83817d1e3a19 aa5e3ebc0dfed 2 minutes ago Exited coredns 0 bbeedeba027f5 coredns-7d764666f9-vqfqb kube-system
825b5a74aef54 8a4ded35a3eb1 2 minutes ago Exited kube-proxy 0 58393cab0a018 kube-proxy-bcqzf kube-system
dbcd28d379e9d 7bb6219ddab95 2 minutes ago Exited kube-scheduler 0 3aeb2c8b83364 kube-scheduler-no-preload-480987 kube-system
421c3cd800264 a3e246e9556e9 2 minutes ago Exited etcd 0 f584e9b37f307 etcd-no-preload-480987 kube-system
0a4ff8bbd246b 45f3cc72d235f 2 minutes ago Exited kube-controller-manager 0 3a909272bcfee kube-controller-manager-no-preload-480987 kube-system
15efb3b314731 aa9d02839d8de 2 minutes ago Exited kube-apiserver 0 6cd8631e870c0 kube-apiserver-no-preload-480987 kube-system
==> coredns [a83817d1e3a1] <==
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.13.1
linux/amd64, go1.25.2, 1db4568
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[INFO] plugin/ready: Plugins not ready: "kubernetes"
[ERROR] plugin/kubernetes: Failed to watch
[ERROR] plugin/kubernetes: Failed to watch
[ERROR] plugin/kubernetes: Failed to watch
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
[INFO] Reloading complete
[INFO] 127.0.0.1:48197 - 36083 "HINFO IN 948520708112921410.8802066444027197549. udp 56 false 512" NXDOMAIN qr,rd,ra 131 0.08414206s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [df6bc06c0731] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
CoreDNS-1.13.1
linux/amd64, go1.25.2, 1db4568
[INFO] 127.0.0.1:41339 - 43934 "HINFO IN 5178304912045032897.7220680391157509907. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.10370268s
==> describe nodes <==
Name: no-preload-480987
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=no-preload-480987
kubernetes.io/os=linux
minikube.k8s.io/commit=142a8bd7cb3f031b5f72a3965bb211dc77d9e1a7
minikube.k8s.io/name=no-preload-480987
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_12_13T14_13_20_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 13 Dec 2025 14:13:16 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: no-preload-480987
AcquireTime: <unset>
RenewTime: Sat, 13 Dec 2025 14:16:07 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:13:14 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:13:14 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:13:14 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 13 Dec 2025 14:16:08 +0000 Sat, 13 Dec 2025 14:15:08 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.83.249
Hostname: no-preload-480987
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: a518b2b6861e4d398d1398567a956c88
System UUID: a518b2b6-861e-4d39-8d13-98567a956c88
Boot ID: f2072675-ae25-45ab-b1ff-1c552f111941
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.2
Kubelet Version: v1.35.0-beta.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m9s
kube-system coredns-7d764666f9-vqfqb 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m47s
kube-system etcd-no-preload-480987 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m52s
kube-system kube-apiserver-no-preload-480987 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m52s
kube-system kube-controller-manager-no-preload-480987 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m53s
kube-system kube-proxy-bcqzf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m47s
kube-system kube-scheduler-no-preload-480987 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m52s
kube-system metrics-server-5d785b57d4-5xl42 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 119s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m44s
kubernetes-dashboard dashboard-metrics-scraper-867fb5f87b-nkc9p 0 (0%) 0 (0%) 0 (0%) 0 (0%) 64s
kubernetes-dashboard kubernetes-dashboard-b84665fb8-qgkp8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 64s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 2m48s node-controller Node no-preload-480987 event: Registered Node no-preload-480987 in Controller
Normal RegisteredNode 67s node-controller Node no-preload-480987 event: Registered Node no-preload-480987 in Controller
==> dmesg <==
[Dec13 14:14] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.001357] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.010383] (rpcbind)[120]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.784672] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000030] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000003] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.155485] kauditd_printk_skb: 1 callbacks suppressed
[ +0.144137] kauditd_printk_skb: 393 callbacks suppressed
[ +1.726112] kauditd_printk_skb: 161 callbacks suppressed
[Dec13 14:15] kauditd_printk_skb: 110 callbacks suppressed
[ +0.000056] kauditd_printk_skb: 110 callbacks suppressed
[ +5.837184] kauditd_printk_skb: 223 callbacks suppressed
[ +0.228037] kauditd_printk_skb: 72 callbacks suppressed
[Dec13 14:16] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [421c3cd80026] <==
{"level":"warn","ts":"2025-12-13T14:13:15.360075Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45152","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.366738Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45172","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.382219Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45192","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.388352Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45214","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:13:15.481328Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:45230","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-12-13T14:13:30.479876Z","caller":"traceutil/trace.go:172","msg":"trace[1833513221] transaction","detail":"{read_only:false; response_revision:438; number_of_response:1; }","duration":"124.821045ms","start":"2025-12-13T14:13:30.354990Z","end":"2025-12-13T14:13:30.479811Z","steps":["trace[1833513221] 'process raft request' (duration: 124.585013ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:13:30.758630Z","caller":"traceutil/trace.go:172","msg":"trace[2140602732] transaction","detail":"{read_only:false; response_revision:439; number_of_response:1; }","duration":"136.602392ms","start":"2025-12-13T14:13:30.622014Z","end":"2025-12-13T14:13:30.758616Z","steps":["trace[2140602732] 'process raft request' (duration: 136.409305ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:14:15.200825Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-12-13T14:14:15.202393Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"no-preload-480987","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.249:2380"],"advertise-client-urls":["https://192.168.83.249:2379"]}
{"level":"error","ts":"2025-12-13T14:14:15.202578Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-13T14:14:22.207006Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-13T14:14:22.210578Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-13T14:14:22.210910Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"f03e5af8f7ea6d24","current-leader-member-id":"f03e5af8f7ea6d24"}
{"level":"info","ts":"2025-12-13T14:14:22.211541Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
{"level":"info","ts":"2025-12-13T14:14:22.211817Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"warn","ts":"2025-12-13T14:14:22.214632Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-13T14:14:22.214878Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-13T14:14:22.214910Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-12-13T14:14:22.215259Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.83.249:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-13T14:14:22.215416Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.83.249:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-13T14:14:22.215558Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.249:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-13T14:14:22.218997Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.83.249:2380"}
{"level":"error","ts":"2025-12-13T14:14:22.219273Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.249:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-13T14:14:22.219421Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.83.249:2380"}
{"level":"info","ts":"2025-12-13T14:14:22.219571Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"no-preload-480987","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.249:2380"],"advertise-client-urls":["https://192.168.83.249:2379"]}
==> etcd [abc673268b8c] <==
{"level":"warn","ts":"2025-12-13T14:15:00.561226Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49132","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.567555Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49136","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.582549Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49164","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.597405Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49196","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.610812Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49228","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.623256Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49264","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.636624Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49268","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.646981Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49282","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.659299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49294","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.682561Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49298","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.687891Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49324","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.715178Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49346","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.740572Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49378","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.754560Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49402","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.765201Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49418","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-12-13T14:15:00.833533Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:49436","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-12-13T14:15:16.036041Z","caller":"traceutil/trace.go:172","msg":"trace[398394203] linearizableReadLoop","detail":"{readStateIndex:775; appliedIndex:775; }","duration":"193.985562ms","start":"2025-12-13T14:15:15.842027Z","end":"2025-12-13T14:15:16.036013Z","steps":["trace[398394203] 'read index received' (duration: 193.980301ms)","trace[398394203] 'applied index is now lower than readState.Index' (duration: 4.69µs)"],"step_count":2}
{"level":"warn","ts":"2025-12-13T14:15:16.036309Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"194.210969ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-13T14:15:16.036359Z","caller":"traceutil/trace.go:172","msg":"trace[742166215] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:727; }","duration":"194.326868ms","start":"2025-12-13T14:15:15.842021Z","end":"2025-12-13T14:15:16.036348Z","steps":["trace[742166215] 'agreement among raft nodes before linearized reading' (duration: 194.179953ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:21.850284Z","caller":"traceutil/trace.go:172","msg":"trace[1856919751] transaction","detail":"{read_only:false; response_revision:752; number_of_response:1; }","duration":"119.966499ms","start":"2025-12-13T14:15:21.730293Z","end":"2025-12-13T14:15:21.850259Z","steps":["trace[1856919751] 'process raft request' (duration: 119.771866ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:21.870555Z","caller":"traceutil/trace.go:172","msg":"trace[1096756970] transaction","detail":"{read_only:false; response_revision:753; number_of_response:1; }","duration":"139.221775ms","start":"2025-12-13T14:15:21.731316Z","end":"2025-12-13T14:15:21.870538Z","steps":["trace[1096756970] 'process raft request' (duration: 139.104459ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:22.391408Z","caller":"traceutil/trace.go:172","msg":"trace[135788815] linearizableReadLoop","detail":"{readStateIndex:807; appliedIndex:807; }","duration":"118.341068ms","start":"2025-12-13T14:15:22.273045Z","end":"2025-12-13T14:15:22.391386Z","steps":["trace[135788815] 'read index received' (duration: 118.33172ms)","trace[135788815] 'applied index is now lower than readState.Index' (duration: 8.341µs)"],"step_count":2}
{"level":"warn","ts":"2025-12-13T14:15:22.391569Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"118.496908ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-12-13T14:15:22.391603Z","caller":"traceutil/trace.go:172","msg":"trace[588260666] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:757; }","duration":"118.552652ms","start":"2025-12-13T14:15:22.273037Z","end":"2025-12-13T14:15:22.391589Z","steps":["trace[588260666] 'agreement among raft nodes before linearized reading' (duration: 118.470061ms)"],"step_count":1}
{"level":"info","ts":"2025-12-13T14:15:22.391585Z","caller":"traceutil/trace.go:172","msg":"trace[600189230] transaction","detail":"{read_only:false; response_revision:758; number_of_response:1; }","duration":"155.435049ms","start":"2025-12-13T14:15:22.236137Z","end":"2025-12-13T14:15:22.391572Z","steps":["trace[600189230] 'process raft request' (duration: 155.304345ms)"],"step_count":1}
==> kernel <==
14:16:12 up 1 min, 0 users, load average: 2.14, 0.81, 0.30
Linux no-preload-480987 6.6.95 #1 SMP PREEMPT_DYNAMIC Sat Dec 13 11:18:23 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [15efb3b31473] <==
W1213 14:14:24.445471 1 logging.go:55] [core] [Channel #111 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.581281 1 logging.go:55] [core] [Channel #47 SubChannel #49]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.594077 1 logging.go:55] [core] [Channel #195 SubChannel #197]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.594172 1 logging.go:55] [core] [Channel #247 SubChannel #249]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.618785 1 logging.go:55] [core] [Channel #123 SubChannel #125]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.664786 1 logging.go:55] [core] [Channel #223 SubChannel #225]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.697063 1 logging.go:55] [core] [Channel #131 SubChannel #133]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.708234 1 logging.go:55] [core] [Channel #127 SubChannel #129]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.756333 1 logging.go:55] [core] [Channel #87 SubChannel #89]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.762851 1 logging.go:55] [core] [Channel #75 SubChannel #77]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.774633 1 logging.go:55] [core] [Channel #99 SubChannel #101]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.796479 1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.805360 1 logging.go:55] [core] [Channel #243 SubChannel #245]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.817094 1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.822343 1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.872137 1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.890323 1 logging.go:55] [core] [Channel #155 SubChannel #157]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.926833 1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.966475 1 logging.go:55] [core] [Channel #43 SubChannel #45]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.985546 1 logging.go:55] [core] [Channel #79 SubChannel #81]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.988201 1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:24.999081 1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:25.021870 1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:25.025487 1 logging.go:55] [core] [Channel #239 SubChannel #241]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1213 14:14:25.154191 1 logging.go:55] [core] [Channel #211 SubChannel #213]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [c04badbd06c5] <==
E1213 14:15:02.854728 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1213 14:15:02.855393 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1213 14:15:03.482345 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.83.249]
I1213 14:15:03.487471 1 controller.go:667] quota admission added evaluator for: endpoints
I1213 14:15:04.195966 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1213 14:15:04.274505 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1213 14:15:04.337748 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1213 14:15:04.356516 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1213 14:15:05.251412 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1213 14:15:05.435965 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1213 14:15:07.692640 1 controller.go:667] quota admission added evaluator for: namespaces
I1213 14:15:08.351748 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.102.118.59"}
I1213 14:15:08.403429 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.111.234.215"}
W1213 14:16:06.893461 1 handler_proxy.go:99] no RequestInfo found in the context
E1213 14:16:06.893817 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1213 14:16:06.893851 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1213 14:16:06.962470 1 handler_proxy.go:99] no RequestInfo found in the context
E1213 14:16:06.969263 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1213 14:16:06.969326 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [0a4ff8bbd246] <==
I1213 14:13:24.171667 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173371 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173673 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.227517 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:13:24.171973 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173119 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173204 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173288 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173453 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.173836 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174040 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.243849 1 range_allocator.go:177] "Sending events to api server"
I1213 14:13:24.243896 1 range_allocator.go:181] "Starting range CIDR allocator"
I1213 14:13:24.243904 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:13:24.243916 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174139 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174232 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174313 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.174392 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.288212 1 range_allocator.go:433] "Set node PodCIDR" node="no-preload-480987" podCIDRs=["10.244.0.0/24"]
I1213 14:13:24.328300 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.372441 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:24.372523 1 garbagecollector.go:166] "Garbage collector: all resource monitors have synced"
I1213 14:13:24.372530 1 garbagecollector.go:169] "Proceeding to collect garbage"
I1213 14:13:29.188585 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
==> kube-controller-manager [f15386049dc5] <==
I1213 14:15:05.123173 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.127814 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.145713 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.145767 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.145867 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.148926 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.151356 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.153402 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" zone=""
I1213 14:15:05.131028 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.166930 1 garbagecollector.go:792] "failed to discover some groups" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
I1213 14:15:05.168763 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" node="no-preload-480987"
I1213 14:15:05.168796 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.178980 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode"
I1213 14:15:05.275565 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:15:05.376144 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:05.376162 1 garbagecollector.go:166] "Garbage collector: all resource monitors have synced"
I1213 14:15:05.376168 1 garbagecollector.go:169] "Proceeding to collect garbage"
I1213 14:15:05.377426 1 shared_informer.go:377] "Caches are synced"
E1213 14:15:07.975850 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b\" failed with pods \"dashboard-metrics-scraper-867fb5f87b-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1213 14:15:08.023416 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b\" failed with pods \"dashboard-metrics-scraper-867fb5f87b-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1213 14:15:08.076776 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b\" failed with pods \"dashboard-metrics-scraper-867fb5f87b-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1213 14:15:08.087381 1 replica_set.go:592] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-b84665fb8\" failed with pods \"kubernetes-dashboard-b84665fb8-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1213 14:15:10.180464 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode"
E1213 14:16:06.970751 1 resource_quota_controller.go:460] "Error during resource discovery" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
I1213 14:16:06.998273 1 garbagecollector.go:792] "failed to discover some groups" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
==> kube-proxy [825b5a74aef5] <==
I1213 14:13:27.434952 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:13:27.537239 1 shared_informer.go:377] "Caches are synced"
I1213 14:13:27.537315 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.249"]
E1213 14:13:27.541477 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1213 14:13:27.890996 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1213 14:13:27.891076 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1213 14:13:27.891101 1 server_linux.go:136] "Using iptables Proxier"
I1213 14:13:28.046345 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1213 14:13:28.047596 1 server.go:529] "Version info" version="v1.35.0-beta.0"
I1213 14:13:28.047613 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 14:13:28.069007 1 config.go:200] "Starting service config controller"
I1213 14:13:28.069351 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1213 14:13:28.069786 1 config.go:106] "Starting endpoint slice config controller"
I1213 14:13:28.069797 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1213 14:13:28.084631 1 config.go:403] "Starting serviceCIDR config controller"
I1213 14:13:28.084652 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1213 14:13:28.092180 1 config.go:309] "Starting node config controller"
I1213 14:13:28.092221 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1213 14:13:28.092229 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1213 14:13:28.172328 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1213 14:13:28.172494 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1213 14:13:28.185119 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-proxy [d56ac35f2023] <==
I1213 14:15:04.179196 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:15:04.280524 1 shared_informer.go:377] "Caches are synced"
I1213 14:15:04.282161 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.249"]
E1213 14:15:04.282304 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1213 14:15:04.416551 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1213 14:15:04.416804 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1213 14:15:04.416966 1 server_linux.go:136] "Using iptables Proxier"
I1213 14:15:04.483468 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1213 14:15:04.486426 1 server.go:529] "Version info" version="v1.35.0-beta.0"
I1213 14:15:04.486470 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 14:15:04.514733 1 config.go:200] "Starting service config controller"
I1213 14:15:04.514829 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1213 14:15:04.514848 1 config.go:106] "Starting endpoint slice config controller"
I1213 14:15:04.514852 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1213 14:15:04.514869 1 config.go:403] "Starting serviceCIDR config controller"
I1213 14:15:04.514873 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1213 14:15:04.531338 1 config.go:309] "Starting node config controller"
I1213 14:15:04.547621 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1213 14:15:04.549356 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1213 14:15:04.619402 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1213 14:15:04.632403 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1213 14:15:04.632548 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [bb9406d173c8] <==
I1213 14:14:59.768053 1 serving.go:386] Generated self-signed cert in-memory
W1213 14:15:01.618693 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1213 14:15:01.618832 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1213 14:15:01.618857 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W1213 14:15:01.619158 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1213 14:15:01.741589 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0-beta.0"
I1213 14:15:01.741634 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 14:15:01.749900 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1213 14:15:01.755671 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1213 14:15:01.758170 1 shared_informer.go:370] "Waiting for caches to sync"
I1213 14:15:01.758530 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1213 14:15:01.859577 1 shared_informer.go:377] "Caches are synced"
==> kube-scheduler [dbcd28d379e9] <==
E1213 14:13:17.759489 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot watch resource \"replicationcontrollers\" in API group \"\" at the cluster scope"
E1213 14:13:17.761454 1 reflector.go:204] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.ReplicationController"
E1213 14:13:17.807793 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope"
E1213 14:13:17.810508 1 reflector.go:204] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.StorageClass"
E1213 14:13:17.828149 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot watch resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope"
E1213 14:13:17.830273 1 reflector.go:204] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.PersistentVolumeClaim"
E1213 14:13:17.838735 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope"
E1213 14:13:17.842088 1 reflector.go:204] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.CSINode"
E1213 14:13:17.864932 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot watch resource \"configmaps\" in API group \"\" in the namespace \"kube-system\""
E1213 14:13:17.868183 1 reflector.go:204] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1693" type="*v1.ConfigMap"
E1213 14:13:17.872924 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope"
E1213 14:13:17.874635 1 reflector.go:204] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.DeviceClass"
E1213 14:13:17.963042 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="services is forbidden: User \"system:kube-scheduler\" cannot watch resource \"services\" in API group \"\" at the cluster scope"
E1213 14:13:17.965851 1 reflector.go:204] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.Service"
E1213 14:13:17.991884 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="pods is forbidden: User \"system:kube-scheduler\" cannot watch resource \"pods\" in API group \"\" at the cluster scope"
E1213 14:13:17.995477 1 reflector.go:204] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.Pod"
E1213 14:13:18.019764 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope"
E1213 14:13:18.022894 1 reflector.go:204] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.CSIStorageCapacity"
E1213 14:13:18.028239 1 reflector.go:429] "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking" err="resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot watch resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope"
E1213 14:13:18.030500 1 reflector.go:204] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:161" type="*v1.ResourceSlice"
I1213 14:13:19.979206 1 shared_informer.go:377] "Caches are synced"
I1213 14:14:15.158169 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1213 14:14:15.161149 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1213 14:14:15.161158 1 server.go:265] "[graceful-termination] secure server is exiting"
E1213 14:14:15.161189 1 run.go:72] "command failed" err="finished without leader elect"
==> kubelet <==
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619768 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/04edfc90843076c87e63de2a69653f0a-usr-share-ca-certificates\") pod \"kube-apiserver-no-preload-480987\" (UID: \"04edfc90843076c87e63de2a69653f0a\") " pod="kube-system/kube-apiserver-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619803 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/6eb935396208fda222fa24605b775590-etcd-certs\") pod \"etcd-no-preload-480987\" (UID: \"6eb935396208fda222fa24605b775590\") " pod="kube-system/etcd-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619860 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/04edfc90843076c87e63de2a69653f0a-ca-certs\") pod \"kube-apiserver-no-preload-480987\" (UID: \"04edfc90843076c87e63de2a69653f0a\") " pod="kube-system/kube-apiserver-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.619874 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/6eb935396208fda222fa24605b775590-etcd-data\") pod \"etcd-no-preload-480987\" (UID: \"6eb935396208fda222fa24605b775590\") " pod="kube-system/etcd-no-preload-480987"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.660740 4407 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.720736 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9fc17afa-ac4f-4744-b380-dff0bf9d5f12-lib-modules\") pod \"kube-proxy-bcqzf\" (UID: \"9fc17afa-ac4f-4744-b380-dff0bf9d5f12\") " pod="kube-system/kube-proxy-bcqzf"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.720913 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/9fc17afa-ac4f-4744-b380-dff0bf9d5f12-xtables-lock\") pod \"kube-proxy-bcqzf\" (UID: \"9fc17afa-ac4f-4744-b380-dff0bf9d5f12\") " pod="kube-system/kube-proxy-bcqzf"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.720958 4407 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/45d578da-7a44-4ee1-8dd2-77c3d4816633-tmp\") pod \"storage-provisioner\" (UID: \"45d578da-7a44-4ee1-8dd2-77c3d4816633\") " pod="kube-system/storage-provisioner"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: I1213 14:16:08.871656 4407 scope.go:122] "RemoveContainer" containerID="7731d9ba696bc48dd0037f538a0957012f30009a9e05e971c946977be10ff36b"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.922718 4407 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.922803 4407 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.923293 4407 kuberuntime_manager.go:1664] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-5d785b57d4-5xl42_kube-system(8be1da2d-1636-4055-8e9d-5ff3844c3e45): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Dec 13 14:16:08 no-preload-480987 kubelet[4407]: E1213 14:16:08.923332 4407 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-5d785b57d4-5xl42" podUID="8be1da2d-1636-4055-8e9d-5ff3844c3e45"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.152638 4407 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.152706 4407 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.153039 4407 kuberuntime_manager.go:1664] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-867fb5f87b-nkc9p_kubernetes-dashboard(9ed00e7f-fb97-46c1-bba9-05beb5234b7e): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.153476 4407 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-867fb5f87b-nkc9p" podUID="9ed00e7f-fb97-46c1-bba9-05beb5234b7e"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.420640 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-no-preload-480987" containerName="etcd"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.425333 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-no-preload-480987" containerName="kube-apiserver"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.425655 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-no-preload-480987" containerName="kube-scheduler"
Dec 13 14:16:09 no-preload-480987 kubelet[4407]: E1213 14:16:09.426720 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-no-preload-480987" containerName="kube-controller-manager"
Dec 13 14:16:10 no-preload-480987 kubelet[4407]: E1213 14:16:10.436011 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-no-preload-480987" containerName="etcd"
Dec 13 14:16:10 no-preload-480987 kubelet[4407]: E1213 14:16:10.436948 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-no-preload-480987" containerName="kube-apiserver"
Dec 13 14:16:10 no-preload-480987 kubelet[4407]: E1213 14:16:10.437292 4407 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-no-preload-480987" containerName="kube-scheduler"
Dec 13 14:16:11 no-preload-480987 kubelet[4407]: E1213 14:16:11.576203 4407 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-vqfqb" containerName="coredns"
==> kubernetes-dashboard [c87ce8eecf3d] <==
2025/12/13 14:15:20 Starting overwatch
2025/12/13 14:15:20 Using namespace: kubernetes-dashboard
2025/12/13 14:15:20 Using in-cluster config to connect to apiserver
2025/12/13 14:15:20 Using secret token for csrf signing
2025/12/13 14:15:20 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/12/13 14:15:20 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/12/13 14:15:20 Successful initial request to the apiserver, version: v1.35.0-beta.0
2025/12/13 14:15:20 Generating JWE encryption key
2025/12/13 14:15:20 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/12/13 14:15:20 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/12/13 14:15:21 Initializing JWE encryption key from synchronized object
2025/12/13 14:15:21 Creating in-cluster Sidecar client
2025/12/13 14:15:21 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/12/13 14:15:21 Serving insecurely on HTTP port: 9090
2025/12/13 14:16:07 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [7731d9ba696b] <==
I1213 14:15:03.625799 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1213 14:15:33.643923 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [cc149c15604e] <==
I1213 14:16:09.293714 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1213 14:16:09.325031 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1213 14:16:09.325859 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1213 14:16:09.334355 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-480987 -n no-preload-480987
helpers_test.go:270: (dbg) Run: kubectl --context no-preload-480987 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:281: non-running pods: metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p
helpers_test.go:283: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: describe non-running pods <======
helpers_test.go:286: (dbg) Run: kubectl --context no-preload-480987 describe pod metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p
helpers_test.go:286: (dbg) Non-zero exit: kubectl --context no-preload-480987 describe pod metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p: exit status 1 (87.043387ms)
** stderr **
Error from server (NotFound): pods "metrics-server-5d785b57d4-5xl42" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-867fb5f87b-nkc9p" not found
** /stderr **
helpers_test.go:288: kubectl --context no-preload-480987 describe pod metrics-server-5d785b57d4-5xl42 dashboard-metrics-scraper-867fb5f87b-nkc9p: exit status 1
--- FAIL: TestStartStop/group/no-preload/serial/Pause (40.36s)