=== RUN TestFunctional/serial/SoftStart
I1229 06:53:22.195105 13486 config.go:182] Loaded profile config "functional-695625": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0
functional_test.go:674: (dbg) Run: out/minikube-linux-amd64 start -p functional-695625 --alsologtostderr -v=8
E1229 06:53:43.099814 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.105120 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.115489 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.135883 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.176239 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.256606 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.417087 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:43.737769 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:44.378852 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:45.659454 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:48.219619 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:53:53.340688 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:54:03.581743 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:54:24.062620 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:55:05.023876 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:56:26.944682 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:58:43.093536 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1229 06:59:10.791976 13486 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/addons-909246/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:674: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-695625 --alsologtostderr -v=8: exit status 81 (6m30.107929216s)
-- stdout --
* [functional-695625] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
- MINIKUBE_LOCATION=22353
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/22353-9552/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/22353-9552/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the kvm2 driver based on existing profile
* Starting "functional-695625" primary control-plane node in "functional-695625" cluster
-- /stdout --
** stderr **
I1229 06:53:22.250786 17440 out.go:360] Setting OutFile to fd 1 ...
I1229 06:53:22.251073 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:53:22.251082 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:53:22.251087 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:53:22.251322 17440 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22353-9552/.minikube/bin
I1229 06:53:22.251807 17440 out.go:368] Setting JSON to false
I1229 06:53:22.252599 17440 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":2152,"bootTime":1766989050,"procs":176,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1229 06:53:22.252669 17440 start.go:143] virtualization: kvm guest
I1229 06:53:22.254996 17440 out.go:179] * [functional-695625] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1229 06:53:22.256543 17440 notify.go:221] Checking for updates...
I1229 06:53:22.256551 17440 out.go:179] - MINIKUBE_LOCATION=22353
I1229 06:53:22.258115 17440 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1229 06:53:22.259464 17440 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22353-9552/kubeconfig
I1229 06:53:22.260823 17440 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22353-9552/.minikube
I1229 06:53:22.262461 17440 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1229 06:53:22.263830 17440 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1229 06:53:22.265499 17440 config.go:182] Loaded profile config "functional-695625": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I1229 06:53:22.265604 17440 driver.go:422] Setting default libvirt URI to qemu:///system
I1229 06:53:22.301877 17440 out.go:179] * Using the kvm2 driver based on existing profile
I1229 06:53:22.303062 17440 start.go:309] selected driver: kvm2
I1229 06:53:22.303099 17440 start.go:928] validating driver "kvm2" against &{Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.35.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:2621
44 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1229 06:53:22.303255 17440 start.go:939] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1229 06:53:22.304469 17440 cni.go:84] Creating CNI manager for ""
I1229 06:53:22.304541 17440 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1229 06:53:22.304607 17440 start.go:353] cluster config:
{Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:functional-695625 Namespace:default APIServer
HAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: Disa
bleOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1229 06:53:22.304716 17440 iso.go:125] acquiring lock: {Name:mk2adf09d18eb25f1d98559b1ab4af84fc4e9a54 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1229 06:53:22.306617 17440 out.go:179] * Starting "functional-695625" primary control-plane node in "functional-695625" cluster
I1229 06:53:22.307989 17440 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I1229 06:53:22.308028 17440 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22353-9552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-amd64.tar.lz4
I1229 06:53:22.308037 17440 cache.go:65] Caching tarball of preloaded images
I1229 06:53:22.308172 17440 preload.go:251] Found /home/jenkins/minikube-integration/22353-9552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1229 06:53:22.308185 17440 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0 on docker
I1229 06:53:22.308288 17440 profile.go:143] Saving config to /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/config.json ...
I1229 06:53:22.308499 17440 start.go:360] acquireMachinesLock for functional-695625: {Name:mk15f2078da2c2dd9529f5e9a0dd3e4cc97196c6 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1229 06:53:22.308543 17440 start.go:364] duration metric: took 25.28µs to acquireMachinesLock for "functional-695625"
I1229 06:53:22.308555 17440 start.go:96] Skipping create...Using existing machine configuration
I1229 06:53:22.308560 17440 fix.go:54] fixHost starting:
I1229 06:53:22.310738 17440 fix.go:112] recreateIfNeeded on functional-695625: state=Running err=<nil>
W1229 06:53:22.310765 17440 fix.go:138] unexpected machine state, will restart: <nil>
I1229 06:53:22.313927 17440 out.go:252] * Updating the running kvm2 "functional-695625" VM ...
I1229 06:53:22.313960 17440 machine.go:94] provisionDockerMachine start ...
I1229 06:53:22.317184 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.317690 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.317748 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.317941 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.318146 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.318156 17440 main.go:144] libmachine: About to run SSH command:
hostname
I1229 06:53:22.424049 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: functional-695625
I1229 06:53:22.424102 17440 buildroot.go:166] provisioning hostname "functional-695625"
I1229 06:53:22.427148 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.427685 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.427715 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.427957 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.428261 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.428280 17440 main.go:144] libmachine: About to run SSH command:
sudo hostname functional-695625 && echo "functional-695625" | sudo tee /etc/hostname
I1229 06:53:22.552563 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: functional-695625
I1229 06:53:22.555422 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.555807 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.555834 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.556061 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.556278 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.556302 17440 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-695625' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-695625/g' /etc/hosts;
else
echo '127.0.1.1 functional-695625' | sudo tee -a /etc/hosts;
fi
fi
I1229 06:53:22.661438 17440 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1229 06:53:22.661470 17440 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/22353-9552/.minikube CaCertPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22353-9552/.minikube}
I1229 06:53:22.661505 17440 buildroot.go:174] setting up certificates
I1229 06:53:22.661529 17440 provision.go:84] configureAuth start
I1229 06:53:22.664985 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.665439 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.665459 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.667758 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.668124 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.668145 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.668257 17440 provision.go:143] copyHostCerts
I1229 06:53:22.668280 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem
I1229 06:53:22.668308 17440 exec_runner.go:144] found /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem, removing ...
I1229 06:53:22.668317 17440 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem
I1229 06:53:22.668383 17440 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem (1082 bytes)
I1229 06:53:22.668476 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem
I1229 06:53:22.668505 17440 exec_runner.go:144] found /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem, removing ...
I1229 06:53:22.668512 17440 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem
I1229 06:53:22.668541 17440 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem (1123 bytes)
I1229 06:53:22.668582 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem
I1229 06:53:22.668598 17440 exec_runner.go:144] found /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem, removing ...
I1229 06:53:22.668603 17440 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem
I1229 06:53:22.668632 17440 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem (1675 bytes)
I1229 06:53:22.668676 17440 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca-key.pem org=jenkins.functional-695625 san=[127.0.0.1 192.168.39.121 functional-695625 localhost minikube]
I1229 06:53:22.746489 17440 provision.go:177] copyRemoteCerts
I1229 06:53:22.746545 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1229 06:53:22.749128 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.749596 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.749616 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.749757 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:22.836885 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I1229 06:53:22.836959 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1229 06:53:22.872390 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I1229 06:53:22.872481 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1229 06:53:22.908829 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem -> /etc/docker/server.pem
I1229 06:53:22.908896 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1229 06:53:22.941014 17440 provision.go:87] duration metric: took 279.457536ms to configureAuth
I1229 06:53:22.941053 17440 buildroot.go:189] setting minikube options for container-runtime
I1229 06:53:22.941277 17440 config.go:182] Loaded profile config "functional-695625": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I1229 06:53:22.944375 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.944857 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.944916 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.945128 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.945387 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.945402 17440 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1229 06:53:23.052106 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: tmpfs
I1229 06:53:23.052136 17440 buildroot.go:70] root file system type: tmpfs
I1229 06:53:23.052304 17440 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1229 06:53:23.055887 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.056416 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.056446 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.056629 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:23.056893 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:23.056961 17440 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1229 06:53:23.183096 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1229 06:53:23.186465 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.186943 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.187006 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.187227 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:23.187475 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:23.187494 17440 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1229 06:53:23.306011 17440 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1229 06:53:23.306077 17440 machine.go:97] duration metric: took 992.109676ms to provisionDockerMachine
I1229 06:53:23.306099 17440 start.go:293] postStartSetup for "functional-695625" (driver="kvm2")
I1229 06:53:23.306114 17440 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1229 06:53:23.306201 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1229 06:53:23.309537 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.309944 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.309967 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.310122 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:23.393657 17440 ssh_runner.go:195] Run: cat /etc/os-release
I1229 06:53:23.398689 17440 command_runner.go:130] > NAME=Buildroot
I1229 06:53:23.398723 17440 command_runner.go:130] > VERSION=2025.02
I1229 06:53:23.398731 17440 command_runner.go:130] > ID=buildroot
I1229 06:53:23.398737 17440 command_runner.go:130] > VERSION_ID=2025.02
I1229 06:53:23.398745 17440 command_runner.go:130] > PRETTY_NAME="Buildroot 2025.02"
I1229 06:53:23.398791 17440 info.go:137] Remote host: Buildroot 2025.02
I1229 06:53:23.398821 17440 filesync.go:126] Scanning /home/jenkins/minikube-integration/22353-9552/.minikube/addons for local assets ...
I1229 06:53:23.398897 17440 filesync.go:126] Scanning /home/jenkins/minikube-integration/22353-9552/.minikube/files for local assets ...
I1229 06:53:23.398981 17440 filesync.go:149] local asset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem -> 134862.pem in /etc/ssl/certs
I1229 06:53:23.398993 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem -> /etc/ssl/certs/134862.pem
I1229 06:53:23.399068 17440 filesync.go:149] local asset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/test/nested/copy/13486/hosts -> hosts in /etc/test/nested/copy/13486
I1229 06:53:23.399075 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/test/nested/copy/13486/hosts -> /etc/test/nested/copy/13486/hosts
I1229 06:53:23.399114 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/13486
I1229 06:53:23.412045 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem --> /etc/ssl/certs/134862.pem (1708 bytes)
I1229 06:53:23.445238 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/test/nested/copy/13486/hosts --> /etc/test/nested/copy/13486/hosts (40 bytes)
I1229 06:53:23.479048 17440 start.go:296] duration metric: took 172.930561ms for postStartSetup
I1229 06:53:23.479099 17440 fix.go:56] duration metric: took 1.170538464s for fixHost
I1229 06:53:23.482307 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.482761 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.482808 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.483049 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:23.483313 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:23.483327 17440 main.go:144] libmachine: About to run SSH command:
date +%s.%N
I1229 06:53:23.586553 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: 1766991203.580410695
I1229 06:53:23.586572 17440 fix.go:216] guest clock: 1766991203.580410695
I1229 06:53:23.586579 17440 fix.go:229] Guest: 2025-12-29 06:53:23.580410695 +0000 UTC Remote: 2025-12-29 06:53:23.479103806 +0000 UTC m=+1.278853461 (delta=101.306889ms)
I1229 06:53:23.586594 17440 fix.go:200] guest clock delta is within tolerance: 101.306889ms
I1229 06:53:23.586598 17440 start.go:83] releasing machines lock for "functional-695625", held for 1.278049275s
I1229 06:53:23.590004 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.590438 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.590463 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.591074 17440 ssh_runner.go:195] Run: cat /version.json
I1229 06:53:23.591186 17440 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1229 06:53:23.594362 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.594454 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.594831 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.594868 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.594954 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.595021 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.595083 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:23.595278 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:23.692873 17440 command_runner.go:130] > <a href="https://github.com/kubernetes/registry.k8s.io">Temporary Redirect</a>.
I1229 06:53:23.692948 17440 command_runner.go:130] > {"iso_version": "v1.37.0-1766979747-22353", "kicbase_version": "v0.0.48-1766884053-22351", "minikube_version": "v1.37.0", "commit": "f5189b2bdbb6990e595e25e06a017f8901d29fa8"}
I1229 06:53:23.693063 17440 ssh_runner.go:195] Run: systemctl --version
I1229 06:53:23.700357 17440 command_runner.go:130] > systemd 256 (256.7)
I1229 06:53:23.700393 17440 command_runner.go:130] > -PAM -AUDIT -SELINUX -APPARMOR -IMA -SMACK +SECCOMP +GCRYPT -GNUTLS +OPENSSL +ACL +BLKID +CURL -ELFUTILS -FIDO2 -IDN2 -IDN +IPTC +KMOD -LIBCRYPTSETUP -LIBCRYPTSETUP_PLUGINS +LIBFDISK +PCRE2 -PWQUALITY -P11KIT -QRENCODE -TPM2 -BZIP2 +LZ4 +XZ +ZLIB -ZSTD -BPF_FRAMEWORK -XKBCOMMON -UTMP -SYSVINIT -LIBARCHIVE
I1229 06:53:23.700501 17440 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1229 06:53:23.707230 17440 command_runner.go:130] ! stat: cannot statx '/etc/cni/net.d/*loopback.conf*': No such file or directory
W1229 06:53:23.707369 17440 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1229 06:53:23.707433 17440 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1229 06:53:23.719189 17440 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1229 06:53:23.719220 17440 start.go:496] detecting cgroup driver to use...
I1229 06:53:23.719246 17440 start.go:519] Kubernetes 1.35.0+ detected, using "systemd" cgroup driver
I1229 06:53:23.719351 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1229 06:53:23.744860 17440 command_runner.go:130] > runtime-endpoint: unix:///run/containerd/containerd.sock
I1229 06:53:23.744940 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1229 06:53:23.758548 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1229 06:53:23.773051 17440 containerd.go:147] configuring containerd to use "systemd" as cgroup driver...
I1229 06:53:23.773122 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1229 06:53:23.786753 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1229 06:53:23.800393 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1229 06:53:23.813395 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1229 06:53:23.826600 17440 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1229 06:53:23.840992 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1229 06:53:23.854488 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1229 06:53:23.869084 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1229 06:53:23.882690 17440 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1229 06:53:23.894430 17440 command_runner.go:130] > net.bridge.bridge-nf-call-iptables = 1
I1229 06:53:23.894542 17440 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1229 06:53:23.912444 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:24.139583 17440 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1229 06:53:24.191402 17440 start.go:496] detecting cgroup driver to use...
I1229 06:53:24.191457 17440 start.go:519] Kubernetes 1.35.0+ detected, using "systemd" cgroup driver
I1229 06:53:24.191521 17440 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1229 06:53:24.217581 17440 command_runner.go:130] > # /usr/lib/systemd/system/docker.service
I1229 06:53:24.217604 17440 command_runner.go:130] > [Unit]
I1229 06:53:24.217609 17440 command_runner.go:130] > Description=Docker Application Container Engine
I1229 06:53:24.217615 17440 command_runner.go:130] > Documentation=https://docs.docker.com
I1229 06:53:24.217626 17440 command_runner.go:130] > After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
I1229 06:53:24.217631 17440 command_runner.go:130] > Wants=network-online.target containerd.service
I1229 06:53:24.217635 17440 command_runner.go:130] > Requires=docker.socket
I1229 06:53:24.217638 17440 command_runner.go:130] > StartLimitBurst=3
I1229 06:53:24.217642 17440 command_runner.go:130] > StartLimitIntervalSec=60
I1229 06:53:24.217646 17440 command_runner.go:130] > [Service]
I1229 06:53:24.217649 17440 command_runner.go:130] > Type=notify
I1229 06:53:24.217653 17440 command_runner.go:130] > Restart=always
I1229 06:53:24.217660 17440 command_runner.go:130] > ExecStart=
I1229 06:53:24.217694 17440 command_runner.go:130] > ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
I1229 06:53:24.217710 17440 command_runner.go:130] > ExecReload=/bin/kill -s HUP $MAINPID
I1229 06:53:24.217748 17440 command_runner.go:130] > # Having non-zero Limit*s causes performance problems due to accounting overhead
I1229 06:53:24.217761 17440 command_runner.go:130] > # in the kernel. We recommend using cgroups to do container-local accounting.
I1229 06:53:24.217767 17440 command_runner.go:130] > LimitNOFILE=infinity
I1229 06:53:24.217782 17440 command_runner.go:130] > LimitNPROC=infinity
I1229 06:53:24.217790 17440 command_runner.go:130] > LimitCORE=infinity
I1229 06:53:24.217818 17440 command_runner.go:130] > # Uncomment TasksMax if your systemd version supports it.
I1229 06:53:24.217828 17440 command_runner.go:130] > # Only systemd 226 and above support this version.
I1229 06:53:24.217833 17440 command_runner.go:130] > TasksMax=infinity
I1229 06:53:24.217840 17440 command_runner.go:130] > # set delegate yes so that systemd does not reset the cgroups of docker containers
I1229 06:53:24.217847 17440 command_runner.go:130] > Delegate=yes
I1229 06:53:24.217855 17440 command_runner.go:130] > # kill only the docker process, not all processes in the cgroup
I1229 06:53:24.217864 17440 command_runner.go:130] > KillMode=process
I1229 06:53:24.217871 17440 command_runner.go:130] > OOMScoreAdjust=-500
I1229 06:53:24.217881 17440 command_runner.go:130] > [Install]
I1229 06:53:24.217896 17440 command_runner.go:130] > WantedBy=multi-user.target
I1229 06:53:24.217973 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1229 06:53:24.255457 17440 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1229 06:53:24.293449 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1229 06:53:24.313141 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1229 06:53:24.332090 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1229 06:53:24.359168 17440 command_runner.go:130] > runtime-endpoint: unix:///var/run/cri-dockerd.sock
I1229 06:53:24.359453 17440 ssh_runner.go:195] Run: which cri-dockerd
I1229 06:53:24.364136 17440 command_runner.go:130] > /usr/bin/cri-dockerd
I1229 06:53:24.364255 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1229 06:53:24.377342 17440 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1229 06:53:24.400807 17440 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1229 06:53:24.632265 17440 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1229 06:53:24.860401 17440 docker.go:578] configuring docker to use "systemd" as cgroup driver...
I1229 06:53:24.860544 17440 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I1229 06:53:24.885002 17440 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1229 06:53:24.902479 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:25.138419 17440 ssh_runner.go:195] Run: sudo systemctl restart docker
I1229 06:53:48.075078 17440 ssh_runner.go:235] Completed: sudo systemctl restart docker: (22.936617903s)
I1229 06:53:48.075181 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1229 06:53:48.109404 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1229 06:53:48.160259 17440 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1229 06:53:48.213352 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1229 06:53:48.231311 17440 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1229 06:53:48.408709 17440 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1229 06:53:48.584722 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:48.754219 17440 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1229 06:53:48.798068 17440 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1229 06:53:48.815248 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:48.983637 17440 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1229 06:53:49.117354 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1229 06:53:49.139900 17440 start.go:553] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1229 06:53:49.139985 17440 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1229 06:53:49.146868 17440 command_runner.go:130] > File: /var/run/cri-dockerd.sock
I1229 06:53:49.146900 17440 command_runner.go:130] > Size: 0 Blocks: 0 IO Block: 4096 socket
I1229 06:53:49.146910 17440 command_runner.go:130] > Device: 0,23 Inode: 2092 Links: 1
I1229 06:53:49.146918 17440 command_runner.go:130] > Access: (0660/srw-rw----) Uid: ( 0/ root) Gid: ( 1000/ docker)
I1229 06:53:49.146926 17440 command_runner.go:130] > Access: 2025-12-29 06:53:49.121969518 +0000
I1229 06:53:49.146933 17440 command_runner.go:130] > Modify: 2025-12-29 06:53:48.995956445 +0000
I1229 06:53:49.146940 17440 command_runner.go:130] > Change: 2025-12-29 06:53:49.012958222 +0000
I1229 06:53:49.146947 17440 command_runner.go:130] > Birth: 2025-12-29 06:53:48.995956445 +0000
I1229 06:53:49.146986 17440 start.go:574] Will wait 60s for crictl version
I1229 06:53:49.147040 17440 ssh_runner.go:195] Run: which crictl
I1229 06:53:49.152717 17440 command_runner.go:130] > /usr/bin/crictl
I1229 06:53:49.152823 17440 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1229 06:53:49.184154 17440 command_runner.go:130] > Version: 0.1.0
I1229 06:53:49.184179 17440 command_runner.go:130] > RuntimeName: docker
I1229 06:53:49.184183 17440 command_runner.go:130] > RuntimeVersion: 28.5.2
I1229 06:53:49.184188 17440 command_runner.go:130] > RuntimeApiVersion: v1
I1229 06:53:49.184211 17440 start.go:590] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.2
RuntimeApiVersion: v1
I1229 06:53:49.184266 17440 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1229 06:53:49.212414 17440 command_runner.go:130] > 28.5.2
I1229 06:53:49.213969 17440 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1229 06:53:49.257526 17440 command_runner.go:130] > 28.5.2
I1229 06:53:49.262261 17440 out.go:252] * Preparing Kubernetes v1.35.0 on Docker 28.5.2 ...
I1229 06:53:49.266577 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:49.267255 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:49.267298 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:49.267633 17440 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I1229 06:53:49.286547 17440 command_runner.go:130] > 192.168.39.1 host.minikube.internal
I1229 06:53:49.286686 17440 kubeadm.go:884] updating cluster {Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.35.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions
:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I1229 06:53:49.286896 17440 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I1229 06:53:49.286965 17440 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1229 06:53:49.324994 17440 command_runner.go:130] > registry.k8s.io/kube-apiserver:v1.35.0
I1229 06:53:49.325029 17440 command_runner.go:130] > registry.k8s.io/kube-controller-manager:v1.35.0
I1229 06:53:49.325037 17440 command_runner.go:130] > registry.k8s.io/kube-proxy:v1.35.0
I1229 06:53:49.325045 17440 command_runner.go:130] > registry.k8s.io/kube-scheduler:v1.35.0
I1229 06:53:49.325052 17440 command_runner.go:130] > registry.k8s.io/etcd:3.6.6-0
I1229 06:53:49.325060 17440 command_runner.go:130] > registry.k8s.io/coredns/coredns:v1.13.1
I1229 06:53:49.325067 17440 command_runner.go:130] > registry.k8s.io/pause:3.10.1
I1229 06:53:49.325074 17440 command_runner.go:130] > gcr.io/k8s-minikube/storage-provisioner:v5
I1229 06:53:49.325113 17440 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1229 06:53:49.325127 17440 docker.go:624] Images already preloaded, skipping extraction
I1229 06:53:49.325191 17440 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1229 06:53:49.352256 17440 command_runner.go:130] > registry.k8s.io/kube-apiserver:v1.35.0
I1229 06:53:49.352294 17440 command_runner.go:130] > registry.k8s.io/kube-proxy:v1.35.0
I1229 06:53:49.352301 17440 command_runner.go:130] > registry.k8s.io/kube-scheduler:v1.35.0
I1229 06:53:49.352309 17440 command_runner.go:130] > registry.k8s.io/kube-controller-manager:v1.35.0
I1229 06:53:49.352315 17440 command_runner.go:130] > registry.k8s.io/etcd:3.6.6-0
I1229 06:53:49.352323 17440 command_runner.go:130] > registry.k8s.io/coredns/coredns:v1.13.1
I1229 06:53:49.352349 17440 command_runner.go:130] > registry.k8s.io/pause:3.10.1
I1229 06:53:49.352361 17440 command_runner.go:130] > gcr.io/k8s-minikube/storage-provisioner:v5
I1229 06:53:49.352398 17440 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1229 06:53:49.352412 17440 cache_images.go:86] Images are preloaded, skipping loading
I1229 06:53:49.352427 17440 kubeadm.go:935] updating node { 192.168.39.121 8441 v1.35.0 docker true true} ...
I1229 06:53:49.352542 17440 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-695625 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.121
[Install]
config:
{KubernetesVersion:v1.35.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1229 06:53:49.352611 17440 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1229 06:53:49.466471 17440 command_runner.go:130] > systemd
I1229 06:53:49.469039 17440 cni.go:84] Creating CNI manager for ""
I1229 06:53:49.469084 17440 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1229 06:53:49.469108 17440 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1229 06:53:49.469137 17440 kubeadm.go:197] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.39.121 APIServerPort:8441 KubernetesVersion:v1.35.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-695625 NodeName:functional-695625 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.121"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.121 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPa
th:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1229 06:53:49.469275 17440 kubeadm.go:203] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.121
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "functional-695625"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.121"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.121"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1229 06:53:49.469338 17440 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0
I1229 06:53:49.495545 17440 command_runner.go:130] > kubeadm
I1229 06:53:49.495573 17440 command_runner.go:130] > kubectl
I1229 06:53:49.495580 17440 command_runner.go:130] > kubelet
I1229 06:53:49.495602 17440 binaries.go:51] Found k8s binaries, skipping transfer
I1229 06:53:49.495647 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1229 06:53:49.521658 17440 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (318 bytes)
I1229 06:53:49.572562 17440 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1229 06:53:49.658210 17440 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2223 bytes)
I1229 06:53:49.740756 17440 ssh_runner.go:195] Run: grep 192.168.39.121 control-plane.minikube.internal$ /etc/hosts
I1229 06:53:49.746333 17440 command_runner.go:130] > 192.168.39.121 control-plane.minikube.internal
I1229 06:53:49.746402 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:50.073543 17440 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1229 06:53:50.148789 17440 certs.go:69] Setting up /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625 for IP: 192.168.39.121
I1229 06:53:50.148837 17440 certs.go:195] generating shared ca certs ...
I1229 06:53:50.148860 17440 certs.go:227] acquiring lock for ca certs: {Name:mke00d9bdd9ac6280bcf2843fe76ff41695d9199 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1229 06:53:50.149082 17440 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.key
I1229 06:53:50.149152 17440 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.key
I1229 06:53:50.149169 17440 certs.go:257] generating profile certs ...
I1229 06:53:50.149320 17440 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/client.key
I1229 06:53:50.149413 17440 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.key.a4651613
I1229 06:53:50.149478 17440 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.key
I1229 06:53:50.149490 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I1229 06:53:50.149508 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I1229 06:53:50.149525 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I1229 06:53:50.149541 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I1229 06:53:50.149556 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I1229 06:53:50.149573 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I1229 06:53:50.149588 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I1229 06:53:50.149607 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I1229 06:53:50.149673 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486.pem (1338 bytes)
W1229 06:53:50.149723 17440 certs.go:480] ignoring /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486_empty.pem, impossibly tiny 0 bytes
I1229 06:53:50.149738 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca-key.pem (1679 bytes)
I1229 06:53:50.149776 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem (1082 bytes)
I1229 06:53:50.149837 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem (1123 bytes)
I1229 06:53:50.149873 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem (1675 bytes)
I1229 06:53:50.149950 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem (1708 bytes)
I1229 06:53:50.150003 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:50.150023 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486.pem -> /usr/share/ca-certificates/13486.pem
I1229 06:53:50.150038 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem -> /usr/share/ca-certificates/134862.pem
I1229 06:53:50.150853 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1229 06:53:50.233999 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1229 06:53:50.308624 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1229 06:53:50.436538 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1229 06:53:50.523708 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1229 06:53:50.633239 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1229 06:53:50.746852 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1229 06:53:50.793885 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1229 06:53:50.894956 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1229 06:53:50.955149 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486.pem --> /usr/share/ca-certificates/13486.pem (1338 bytes)
I1229 06:53:51.018694 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem --> /usr/share/ca-certificates/134862.pem (1708 bytes)
I1229 06:53:51.084938 17440 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I1229 06:53:51.127238 17440 ssh_runner.go:195] Run: openssl version
I1229 06:53:51.136812 17440 command_runner.go:130] > OpenSSL 3.4.1 11 Feb 2025 (Library: OpenSSL 3.4.1 11 Feb 2025)
I1229 06:53:51.136914 17440 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.154297 17440 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I1229 06:53:51.175503 17440 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.182560 17440 command_runner.go:130] > -rw-r--r-- 1 root root 1111 Dec 29 06:46 /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.182600 17440 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 29 06:46 /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.182653 17440 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.195355 17440 command_runner.go:130] > b5213941
I1229 06:53:51.195435 17440 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I1229 06:53:51.217334 17440 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/13486.pem
I1229 06:53:51.233542 17440 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/13486.pem /etc/ssl/certs/13486.pem
I1229 06:53:51.248778 17440 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13486.pem
I1229 06:53:51.255758 17440 command_runner.go:130] > -rw-r--r-- 1 root root 1338 Dec 29 06:52 /usr/share/ca-certificates/13486.pem
I1229 06:53:51.255826 17440 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 29 06:52 /usr/share/ca-certificates/13486.pem
I1229 06:53:51.255874 17440 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13486.pem
I1229 06:53:51.272983 17440 command_runner.go:130] > 51391683
I1229 06:53:51.273077 17440 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I1229 06:53:51.303911 17440 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/134862.pem
I1229 06:53:51.325828 17440 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/134862.pem /etc/ssl/certs/134862.pem
I1229 06:53:51.347788 17440 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/134862.pem
I1229 06:53:51.360429 17440 command_runner.go:130] > -rw-r--r-- 1 root root 1708 Dec 29 06:52 /usr/share/ca-certificates/134862.pem
I1229 06:53:51.360567 17440 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 29 06:52 /usr/share/ca-certificates/134862.pem
I1229 06:53:51.360625 17440 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/134862.pem
I1229 06:53:51.369235 17440 command_runner.go:130] > 3ec20f2e
I1229 06:53:51.369334 17440 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I1229 06:53:51.381517 17440 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1229 06:53:51.387517 17440 command_runner.go:130] > File: /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1229 06:53:51.387548 17440 command_runner.go:130] > Size: 1176 Blocks: 8 IO Block: 4096 regular file
I1229 06:53:51.387554 17440 command_runner.go:130] > Device: 253,1 Inode: 1052441 Links: 1
I1229 06:53:51.387560 17440 command_runner.go:130] > Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
I1229 06:53:51.387568 17440 command_runner.go:130] > Access: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387572 17440 command_runner.go:130] > Modify: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387577 17440 command_runner.go:130] > Change: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387581 17440 command_runner.go:130] > Birth: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387657 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1229 06:53:51.396600 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.397131 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1229 06:53:51.410180 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.410283 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1229 06:53:51.419062 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.419164 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1229 06:53:51.431147 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.431222 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1229 06:53:51.441881 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.442104 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1229 06:53:51.450219 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.450295 17440 kubeadm.go:401] StartCluster: {Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35
.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1229 06:53:51.450396 17440 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1229 06:53:51.474716 17440 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1229 06:53:51.489086 17440 command_runner.go:130] > /var/lib/kubelet/config.yaml
I1229 06:53:51.489107 17440 command_runner.go:130] > /var/lib/kubelet/kubeadm-flags.env
I1229 06:53:51.489113 17440 command_runner.go:130] > /var/lib/minikube/etcd:
I1229 06:53:51.489117 17440 command_runner.go:130] > member
I1229 06:53:51.489676 17440 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1229 06:53:51.489694 17440 kubeadm.go:598] restartPrimaryControlPlane start ...
I1229 06:53:51.489753 17440 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1229 06:53:51.503388 17440 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1229 06:53:51.503948 17440 kubeconfig.go:125] found "functional-695625" server: "https://192.168.39.121:8441"
I1229 06:53:51.504341 17440 loader.go:405] Config loaded from file: /home/jenkins/minikube-integration/22353-9552/kubeconfig
I1229 06:53:51.504505 17440 kapi.go:59] client config for functional-695625: &rest.Config{Host:"https://192.168.39.121:8441", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/client.crt", KeyFile:"/home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/client.key", CAFile:"/home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), Nex
tProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x2780200), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I1229 06:53:51.504963 17440 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I1229 06:53:51.504986 17440 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I1229 06:53:51.504992 17440 envvar.go:172] "Feature gate default state" feature="InOrderInformersBatchProcess" enabled=true
I1229 06:53:51.504998 17440 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=true
I1229 06:53:51.505004 17440 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=true
I1229 06:53:51.505012 17440 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I1229 06:53:51.505089 17440 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I1229 06:53:51.505414 17440 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1229 06:53:51.521999 17440 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.39.121
I1229 06:53:51.522047 17440 kubeadm.go:1161] stopping kube-system containers ...
I1229 06:53:51.522115 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1229 06:53:51.550376 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:53:51.550407 17440 command_runner.go:130] > a014f32abcd0
I1229 06:53:51.550415 17440 command_runner.go:130] > d81259f64136
I1229 06:53:51.550422 17440 command_runner.go:130] > fb6db97d8ffe
I1229 06:53:51.550432 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:53:51.550441 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:53:51.550448 17440 command_runner.go:130] > 4ed279733477
I1229 06:53:51.550455 17440 command_runner.go:130] > 1fc5fa7d9295
I1229 06:53:51.550462 17440 command_runner.go:130] > 98261fa185f6
I1229 06:53:51.550470 17440 command_runner.go:130] > b046056ff071
I1229 06:53:51.550478 17440 command_runner.go:130] > b3cc8048f6d9
I1229 06:53:51.550485 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:53:51.550491 17440 command_runner.go:130] > 64853b50a6c5
I1229 06:53:51.550496 17440 command_runner.go:130] > bd7d900efd48
I1229 06:53:51.550505 17440 command_runner.go:130] > 8911777281f4
I1229 06:53:51.550511 17440 command_runner.go:130] > a123d63a8edb
I1229 06:53:51.550516 17440 command_runner.go:130] > 548561c7ada8
I1229 06:53:51.550521 17440 command_runner.go:130] > fd22eb0d6c14
I1229 06:53:51.550528 17440 command_runner.go:130] > 14aafc386533
I1229 06:53:51.550540 17440 command_runner.go:130] > abbe46bd960e
I1229 06:53:51.550548 17440 command_runner.go:130] > 4b032678478a
I1229 06:53:51.550556 17440 command_runner.go:130] > 0af491ef7c2f
I1229 06:53:51.550566 17440 command_runner.go:130] > 5024b03252e3
I1229 06:53:51.550572 17440 command_runner.go:130] > fe7b5da2f7fb
I1229 06:53:51.550582 17440 command_runner.go:130] > ad82b94f7629
I1229 06:53:51.552420 17440 docker.go:487] Stopping containers: [6f69ba6a1553 a014f32abcd0 d81259f64136 fb6db97d8ffe 17fe16a2822a a79d99ad3fde 4ed279733477 1fc5fa7d9295 98261fa185f6 b046056ff071 b3cc8048f6d9 bd96b57aa9fc 64853b50a6c5 bd7d900efd48 8911777281f4 a123d63a8edb 548561c7ada8 fd22eb0d6c14 14aafc386533 abbe46bd960e 4b032678478a 0af491ef7c2f 5024b03252e3 fe7b5da2f7fb ad82b94f7629]
I1229 06:53:51.552499 17440 ssh_runner.go:195] Run: docker stop 6f69ba6a1553 a014f32abcd0 d81259f64136 fb6db97d8ffe 17fe16a2822a a79d99ad3fde 4ed279733477 1fc5fa7d9295 98261fa185f6 b046056ff071 b3cc8048f6d9 bd96b57aa9fc 64853b50a6c5 bd7d900efd48 8911777281f4 a123d63a8edb 548561c7ada8 fd22eb0d6c14 14aafc386533 abbe46bd960e 4b032678478a 0af491ef7c2f 5024b03252e3 fe7b5da2f7fb ad82b94f7629
I1229 06:53:51.976888 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:53:51.976911 17440 command_runner.go:130] > a014f32abcd0
I1229 06:53:58.789216 17440 command_runner.go:130] > d81259f64136
I1229 06:53:58.789240 17440 command_runner.go:130] > fb6db97d8ffe
I1229 06:53:58.789248 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:53:58.789252 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:53:58.789256 17440 command_runner.go:130] > 4ed279733477
I1229 06:53:58.789259 17440 command_runner.go:130] > 1fc5fa7d9295
I1229 06:53:58.789262 17440 command_runner.go:130] > 98261fa185f6
I1229 06:53:58.789266 17440 command_runner.go:130] > b046056ff071
I1229 06:53:58.789269 17440 command_runner.go:130] > b3cc8048f6d9
I1229 06:53:58.789272 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:53:58.789275 17440 command_runner.go:130] > 64853b50a6c5
I1229 06:53:58.789278 17440 command_runner.go:130] > bd7d900efd48
I1229 06:53:58.789281 17440 command_runner.go:130] > 8911777281f4
I1229 06:53:58.789284 17440 command_runner.go:130] > a123d63a8edb
I1229 06:53:58.789287 17440 command_runner.go:130] > 548561c7ada8
I1229 06:53:58.789295 17440 command_runner.go:130] > fd22eb0d6c14
I1229 06:53:58.789299 17440 command_runner.go:130] > 14aafc386533
I1229 06:53:58.789303 17440 command_runner.go:130] > abbe46bd960e
I1229 06:53:58.789306 17440 command_runner.go:130] > 4b032678478a
I1229 06:53:58.789310 17440 command_runner.go:130] > 0af491ef7c2f
I1229 06:53:58.789314 17440 command_runner.go:130] > 5024b03252e3
I1229 06:53:58.789317 17440 command_runner.go:130] > fe7b5da2f7fb
I1229 06:53:58.789321 17440 command_runner.go:130] > ad82b94f7629
I1229 06:53:58.790986 17440 ssh_runner.go:235] Completed: docker stop 6f69ba6a1553 a014f32abcd0 d81259f64136 fb6db97d8ffe 17fe16a2822a a79d99ad3fde 4ed279733477 1fc5fa7d9295 98261fa185f6 b046056ff071 b3cc8048f6d9 bd96b57aa9fc 64853b50a6c5 bd7d900efd48 8911777281f4 a123d63a8edb 548561c7ada8 fd22eb0d6c14 14aafc386533 abbe46bd960e 4b032678478a 0af491ef7c2f 5024b03252e3 fe7b5da2f7fb ad82b94f7629: (7.238443049s)
I1229 06:53:58.791057 17440 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1229 06:53:58.833953 17440 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1229 06:53:58.857522 17440 command_runner.go:130] > -rw------- 1 root root 5635 Dec 29 06:52 /etc/kubernetes/admin.conf
I1229 06:53:58.857550 17440 command_runner.go:130] > -rw------- 1 root root 5638 Dec 29 06:52 /etc/kubernetes/controller-manager.conf
I1229 06:53:58.857561 17440 command_runner.go:130] > -rw------- 1 root root 1974 Dec 29 06:52 /etc/kubernetes/kubelet.conf
I1229 06:53:58.857571 17440 command_runner.go:130] > -rw------- 1 root root 5590 Dec 29 06:52 /etc/kubernetes/scheduler.conf
I1229 06:53:58.857610 17440 kubeadm.go:158] found existing configuration files:
-rw------- 1 root root 5635 Dec 29 06:52 /etc/kubernetes/admin.conf
-rw------- 1 root root 5638 Dec 29 06:52 /etc/kubernetes/controller-manager.conf
-rw------- 1 root root 1974 Dec 29 06:52 /etc/kubernetes/kubelet.conf
-rw------- 1 root root 5590 Dec 29 06:52 /etc/kubernetes/scheduler.conf
I1229 06:53:58.857671 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1229 06:53:58.875294 17440 command_runner.go:130] > server: https://control-plane.minikube.internal:8441
I1229 06:53:58.876565 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1229 06:53:58.896533 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 1
stdout:
stderr:
I1229 06:53:58.896617 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1229 06:53:58.917540 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1229 06:53:58.936703 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
stdout:
stderr:
I1229 06:53:58.936777 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1229 06:53:58.957032 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1229 06:53:58.970678 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
stdout:
stderr:
I1229 06:53:58.970742 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1229 06:53:58.992773 17440 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1229 06:53:59.007767 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:53:59.061402 17440 command_runner.go:130] > [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1229 06:53:59.061485 17440 command_runner.go:130] > [certs] Using existing ca certificate authority
I1229 06:53:59.061525 17440 command_runner.go:130] > [certs] Using existing apiserver certificate and key on disk
I1229 06:53:59.061923 17440 command_runner.go:130] > [certs] Using existing apiserver-kubelet-client certificate and key on disk
I1229 06:53:59.062217 17440 command_runner.go:130] > [certs] Using existing front-proxy-ca certificate authority
I1229 06:53:59.062329 17440 command_runner.go:130] > [certs] Using existing front-proxy-client certificate and key on disk
I1229 06:53:59.062606 17440 command_runner.go:130] > [certs] Using existing etcd/ca certificate authority
I1229 06:53:59.062852 17440 command_runner.go:130] > [certs] Using existing etcd/server certificate and key on disk
I1229 06:53:59.062948 17440 command_runner.go:130] > [certs] Using existing etcd/peer certificate and key on disk
I1229 06:53:59.063179 17440 command_runner.go:130] > [certs] Using existing etcd/healthcheck-client certificate and key on disk
I1229 06:53:59.063370 17440 command_runner.go:130] > [certs] Using existing apiserver-etcd-client certificate and key on disk
I1229 06:53:59.063615 17440 command_runner.go:130] > [certs] Using the existing "sa" key
I1229 06:53:59.066703 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:53:59.686012 17440 command_runner.go:130] > [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1229 06:53:59.686050 17440 command_runner.go:130] > [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
I1229 06:53:59.686059 17440 command_runner.go:130] > [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/super-admin.conf"
I1229 06:53:59.686069 17440 command_runner.go:130] > [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1229 06:53:59.686078 17440 command_runner.go:130] > [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1229 06:53:59.686087 17440 command_runner.go:130] > [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1229 06:53:59.686203 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:53:59.995495 17440 command_runner.go:130] > [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1229 06:53:59.995529 17440 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1229 06:53:59.995539 17440 command_runner.go:130] > [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1229 06:53:59.995545 17440 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1229 06:53:59.995549 17440 command_runner.go:130] > [kubelet-start] Starting the kubelet
I1229 06:53:59.995615 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:54:00.047957 17440 command_runner.go:130] > [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1229 06:54:00.047983 17440 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-apiserver"
I1229 06:54:00.053966 17440 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1229 06:54:00.056537 17440 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-scheduler"
I1229 06:54:00.059558 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:54:00.175745 17440 command_runner.go:130] > [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1229 06:54:00.175825 17440 api_server.go:52] waiting for apiserver process to appear ...
I1229 06:54:00.175893 17440 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1229 06:54:00.233895 17440 command_runner.go:130] > 2416
I1229 06:54:00.233940 17440 api_server.go:72] duration metric: took 58.126409ms to wait for apiserver process to appear ...
I1229 06:54:00.233953 17440 api_server.go:88] waiting for apiserver healthz status ...
I1229 06:54:00.233976 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:05.236821 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:05.236865 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:10.239922 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:10.239956 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:15.242312 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:15.242347 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:20.245667 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:20.245726 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:25.248449 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:25.248501 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:30.249241 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:30.249279 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:35.251737 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:35.251771 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:40.254366 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:40.254407 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:45.257232 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:45.257275 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:50.259644 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:50.259685 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:55.261558 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:55.261592 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:55:00.263123 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:55:00.263241 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I1229 06:55:00.287429 17440 command_runner.go:130] > fb6db97d8ffe
I1229 06:55:00.288145 17440 logs.go:282] 1 containers: [fb6db97d8ffe]
I1229 06:55:00.288289 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I1229 06:55:00.310519 17440 command_runner.go:130] > d81259f64136
I1229 06:55:00.310561 17440 logs.go:282] 1 containers: [d81259f64136]
I1229 06:55:00.310630 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I1229 06:55:00.334579 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:55:00.334624 17440 logs.go:282] 1 containers: [6f69ba6a1553]
I1229 06:55:00.334692 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I1229 06:55:00.353472 17440 command_runner.go:130] > 4d49952084c9
I1229 06:55:00.353503 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:55:00.354626 17440 logs.go:282] 2 containers: [4d49952084c9 a79d99ad3fde]
I1229 06:55:00.354714 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I1229 06:55:00.376699 17440 command_runner.go:130] > 8911777281f4
I1229 06:55:00.378105 17440 logs.go:282] 1 containers: [8911777281f4]
I1229 06:55:00.378188 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I1229 06:55:00.397976 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:55:00.399617 17440 logs.go:282] 1 containers: [17fe16a2822a]
I1229 06:55:00.399707 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I1229 06:55:00.419591 17440 logs.go:282] 0 containers: []
W1229 06:55:00.419617 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:55:00.419665 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I1229 06:55:00.440784 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:55:00.441985 17440 logs.go:282] 1 containers: [bd96b57aa9fc]
I1229 06:55:00.442020 17440 logs.go:123] Gathering logs for kube-controller-manager [17fe16a2822a] ...
I1229 06:55:00.442030 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 17fe16a2822a"
I1229 06:55:00.465151 17440 command_runner.go:130] ! I1229 06:53:51.283329 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:55:00.465192 17440 command_runner.go:130] ! I1229 06:53:51.303666 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:55:00.465226 17440 command_runner.go:130] ! I1229 06:53:51.303706 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.465237 17440 command_runner.go:130] ! I1229 06:53:51.307865 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:55:00.465255 17440 command_runner.go:130] ! I1229 06:53:51.308287 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:55:00.465271 17440 command_runner.go:130] ! I1229 06:53:51.309479 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:55:00.465285 17440 command_runner.go:130] ! I1229 06:53:51.309545 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:55:00.465823 17440 logs.go:123] Gathering logs for coredns [6f69ba6a1553] ...
I1229 06:55:00.465845 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6f69ba6a1553"
I1229 06:55:00.487618 17440 logs.go:123] Gathering logs for kube-scheduler [4d49952084c9] ...
I1229 06:55:00.487646 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4d49952084c9"
I1229 06:55:00.508432 17440 command_runner.go:130] ! I1229 06:53:52.882050 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:55:00.508468 17440 command_runner.go:130] ! W1229 06:54:52.896472 1 authentication.go:397] Error looking up in-cluster authentication configuration: the server was unable to return a response in the time allotted, but may still be processing the request (get configmaps extension-apiserver-authentication)
I1229 06:55:00.508482 17440 command_runner.go:130] ! W1229 06:54:52.896499 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
I1229 06:55:00.508508 17440 command_runner.go:130] ! W1229 06:54:52.896506 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1229 06:55:00.508521 17440 command_runner.go:130] ! I1229 06:54:52.913597 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0"
I1229 06:55:00.508529 17440 command_runner.go:130] ! I1229 06:54:52.913622 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.508541 17440 command_runner.go:130] ! I1229 06:54:52.915784 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1229 06:55:00.508551 17440 command_runner.go:130] ! I1229 06:54:52.915816 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:55:00.508560 17440 command_runner.go:130] ! I1229 06:54:52.915823 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1229 06:55:00.508568 17440 command_runner.go:130] ! I1229 06:54:52.915940 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:55:00.510308 17440 logs.go:123] Gathering logs for kube-scheduler [a79d99ad3fde] ...
I1229 06:55:00.510337 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a79d99ad3fde"
I1229 06:55:00.531862 17440 command_runner.go:130] ! I1229 06:53:51.269699 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:55:00.532900 17440 logs.go:123] Gathering logs for kube-proxy [8911777281f4] ...
I1229 06:55:00.532924 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 8911777281f4"
I1229 06:55:00.554051 17440 command_runner.go:130] ! I1229 06:52:47.703648 1 server_linux.go:53] "Using iptables proxy"
I1229 06:55:00.554084 17440 command_runner.go:130] ! I1229 06:52:47.791676 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:55:00.554095 17440 command_runner.go:130] ! I1229 06:52:47.897173 1 shared_informer.go:377] "Caches are synced"
I1229 06:55:00.554109 17440 command_runner.go:130] ! I1229 06:52:47.900073 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.121"]
I1229 06:55:00.554131 17440 command_runner.go:130] ! E1229 06:52:47.906310 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1229 06:55:00.554148 17440 command_runner.go:130] ! I1229 06:52:48.206121 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
I1229 06:55:00.554170 17440 command_runner.go:130] ! error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
I1229 06:55:00.554189 17440 command_runner.go:130] ! Perhaps ip6tables or your kernel needs to be upgraded.
I1229 06:55:00.554195 17440 command_runner.go:130] ! >
I1229 06:55:00.554208 17440 command_runner.go:130] ! I1229 06:52:48.209509 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1229 06:55:00.554224 17440 command_runner.go:130] ! I1229 06:52:48.210145 1 server_linux.go:136] "Using iptables Proxier"
I1229 06:55:00.554250 17440 command_runner.go:130] ! I1229 06:52:48.253805 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1229 06:55:00.554261 17440 command_runner.go:130] ! I1229 06:52:48.255046 1 server.go:529] "Version info" version="v1.35.0"
I1229 06:55:00.554273 17440 command_runner.go:130] ! I1229 06:52:48.255076 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.554316 17440 command_runner.go:130] ! I1229 06:52:48.262205 1 config.go:200] "Starting service config controller"
I1229 06:55:00.554327 17440 command_runner.go:130] ! I1229 06:52:48.262238 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1229 06:55:00.554339 17440 command_runner.go:130] ! I1229 06:52:48.262258 1 config.go:106] "Starting endpoint slice config controller"
I1229 06:55:00.554350 17440 command_runner.go:130] ! I1229 06:52:48.262261 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1229 06:55:00.554366 17440 command_runner.go:130] ! I1229 06:52:48.262278 1 config.go:403] "Starting serviceCIDR config controller"
I1229 06:55:00.554381 17440 command_runner.go:130] ! I1229 06:52:48.262282 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1229 06:55:00.554390 17440 command_runner.go:130] ! I1229 06:52:48.270608 1 config.go:309] "Starting node config controller"
I1229 06:55:00.554402 17440 command_runner.go:130] ! I1229 06:52:48.271311 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1229 06:55:00.554414 17440 command_runner.go:130] ! I1229 06:52:48.271337 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1229 06:55:00.554427 17440 command_runner.go:130] ! I1229 06:52:48.363324 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1229 06:55:00.554437 17440 command_runner.go:130] ! I1229 06:52:48.363427 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1229 06:55:00.554452 17440 command_runner.go:130] ! I1229 06:52:48.363671 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1229 06:55:00.556555 17440 logs.go:123] Gathering logs for storage-provisioner [bd96b57aa9fc] ...
I1229 06:55:00.556578 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 bd96b57aa9fc"
I1229 06:55:00.581812 17440 command_runner.go:130] ! I1229 06:52:48.539098 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1229 06:55:00.581848 17440 command_runner.go:130] ! I1229 06:52:48.550309 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1229 06:55:00.581857 17440 command_runner.go:130] ! I1229 06:52:48.550373 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1229 06:55:00.581865 17440 command_runner.go:130] ! W1229 06:52:48.552935 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581874 17440 command_runner.go:130] ! W1229 06:52:48.563735 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581881 17440 command_runner.go:130] ! I1229 06:52:48.564362 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1229 06:55:00.581890 17440 command_runner.go:130] ! I1229 06:52:48.565422 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:55:00.581911 17440 command_runner.go:130] ! I1229 06:52:48.565143 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cfb65e16-c2f7-4c19-a059-8ef64f8f3f2e", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868 became leader
I1229 06:55:00.581919 17440 command_runner.go:130] ! W1229 06:52:48.576668 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581930 17440 command_runner.go:130] ! W1229 06:52:48.582743 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581942 17440 command_runner.go:130] ! I1229 06:52:48.665711 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:55:00.581949 17440 command_runner.go:130] ! W1229 06:52:50.588204 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581957 17440 command_runner.go:130] ! W1229 06:52:50.593815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581964 17440 command_runner.go:130] ! W1229 06:52:52.597431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581975 17440 command_runner.go:130] ! W1229 06:52:52.602815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581985 17440 command_runner.go:130] ! W1229 06:52:54.606663 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581993 17440 command_runner.go:130] ! W1229 06:52:54.612650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582003 17440 command_runner.go:130] ! W1229 06:52:56.616395 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582010 17440 command_runner.go:130] ! W1229 06:52:56.622404 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582020 17440 command_runner.go:130] ! W1229 06:52:58.626804 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582030 17440 command_runner.go:130] ! W1229 06:52:58.637257 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582037 17440 command_runner.go:130] ! W1229 06:53:00.640728 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582044 17440 command_runner.go:130] ! W1229 06:53:00.646446 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582051 17440 command_runner.go:130] ! W1229 06:53:02.650659 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582070 17440 command_runner.go:130] ! W1229 06:53:02.660374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582080 17440 command_runner.go:130] ! W1229 06:53:04.664091 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582088 17440 command_runner.go:130] ! W1229 06:53:04.669806 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582097 17440 command_runner.go:130] ! W1229 06:53:06.674203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582105 17440 command_runner.go:130] ! W1229 06:53:06.680002 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582115 17440 command_runner.go:130] ! W1229 06:53:08.683483 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582125 17440 command_runner.go:130] ! W1229 06:53:08.688934 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582141 17440 command_runner.go:130] ! W1229 06:53:10.693644 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582152 17440 command_runner.go:130] ! W1229 06:53:10.706122 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582160 17440 command_runner.go:130] ! W1229 06:53:12.709949 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582170 17440 command_runner.go:130] ! W1229 06:53:12.715753 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582177 17440 command_runner.go:130] ! W1229 06:53:14.719191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582186 17440 command_runner.go:130] ! W1229 06:53:14.728100 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582193 17440 command_runner.go:130] ! W1229 06:53:16.731658 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582203 17440 command_runner.go:130] ! W1229 06:53:16.737463 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582211 17440 command_runner.go:130] ! W1229 06:53:18.741304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582221 17440 command_runner.go:130] ! W1229 06:53:18.746708 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582228 17440 command_runner.go:130] ! W1229 06:53:20.749662 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582235 17440 command_runner.go:130] ! W1229 06:53:20.755989 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582242 17440 command_runner.go:130] ! W1229 06:53:22.760815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582252 17440 command_runner.go:130] ! W1229 06:53:22.772421 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582261 17440 command_runner.go:130] ! W1229 06:53:24.776403 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582269 17440 command_runner.go:130] ! W1229 06:53:24.783232 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582276 17440 command_runner.go:130] ! W1229 06:53:26.786665 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582287 17440 command_runner.go:130] ! W1229 06:53:26.792239 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582294 17440 command_runner.go:130] ! W1229 06:53:28.796420 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582302 17440 command_runner.go:130] ! W1229 06:53:28.805511 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582312 17440 command_runner.go:130] ! W1229 06:53:30.808544 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582319 17440 command_runner.go:130] ! W1229 06:53:30.816066 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582329 17440 command_runner.go:130] ! W1229 06:53:32.820090 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582336 17440 command_runner.go:130] ! W1229 06:53:32.826208 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582346 17440 command_runner.go:130] ! W1229 06:53:34.829865 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582353 17440 command_runner.go:130] ! W1229 06:53:34.835774 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582363 17440 command_runner.go:130] ! W1229 06:53:36.839291 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582370 17440 command_runner.go:130] ! W1229 06:53:36.853251 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582378 17440 command_runner.go:130] ! W1229 06:53:38.856432 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582385 17440 command_runner.go:130] ! W1229 06:53:38.862360 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.586872 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:55:00.586916 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:55:00.609702 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.609731 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.609766 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.609784 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.609811 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.609822 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:55:00.609831 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:55:00.609842 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:55:00.609848 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.609857 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:55:00.609865 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.609879 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.609890 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.609906 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.609915 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.609923 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:55:00.609943 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.609954 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:55:00.609966 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.609976 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:55:00.609983 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:55:00.609990 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:55:00.609998 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.610006 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.610016 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.610024 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.610041 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.610050 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.610070 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.610082 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.610091 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.610100 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.610107 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:55:00.610115 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:55:00.610123 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:55:00.610131 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:55:00.610141 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:55:00.610152 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:55:00.610159 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:55:00.610168 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:55:00.610179 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:55:00.610191 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:55:00.610203 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.610216 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.610223 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:55:00.610231 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:55:00.610242 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:55:00.610251 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:55:00.610258 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:55:00.610265 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:55:00.610271 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.610281 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:55:00.610290 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.610303 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.610323 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.610335 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.610345 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.610355 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:55:00.610374 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.610384 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:55:00.610394 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.610404 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:55:00.610412 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.610422 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.610429 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.610439 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.610447 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.610455 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.610461 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:55:00.610470 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:55:00.610476 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:55:00.610483 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.610491 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.610500 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.610508 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.610516 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:55:00.610523 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:55:00.610531 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:55:00.610538 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:55:00.610550 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:55:00.610559 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:55:00.610567 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:55:00.610573 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:55:00.610579 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:55:00.610595 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:55:00.610607 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:55:00.610615 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:55:00.610622 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:55:00.610630 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:55:00.610637 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.610644 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:55:00.610653 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.610669 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.610680 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.610692 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.610705 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.610713 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:55:00.610735 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.610744 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:55:00.610755 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.610765 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:55:00.610772 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.610781 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.610789 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.610809 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.610818 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.610824 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.610853 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610867 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610881 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610896 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610909 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:55:00.610922 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610936 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610949 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610964 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610979 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.610995 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611010 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.611021 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.611037 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:55:00.611048 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:55:00.611062 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:55:00.611070 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:55:00.611079 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:55:00.611087 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:55:00.611096 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:55:00.611102 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:55:00.611109 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:55:00.611118 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.611125 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:55:00.611135 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.611146 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.611157 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.611167 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.611179 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.611186 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:55:00.611199 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611213 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611226 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611241 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611266 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611281 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611295 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611310 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611325 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611342 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611355 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611370 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611382 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611404 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.611417 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:55:00.611435 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:55:00.611449 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:55:00.611464 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:55:00.611476 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:55:00.611491 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:55:00.611502 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:55:00.611517 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:55:00.611529 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:55:00.611544 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:55:00.611558 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:55:00.611574 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:55:00.611586 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:55:00.611601 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:55:00.611617 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:55:00.611631 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:55:00.611645 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:55:00.611660 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:55:00.611674 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:55:00.611689 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:55:00.611702 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:55:00.611712 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:55:00.611722 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.611732 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:55:00.611740 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.611751 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.611759 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.611767 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.611835 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.611849 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.611867 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:55:00.611877 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:55:00.611888 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:55:00.611894 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:55:00.611901 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:55:00.611909 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.611917 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.611929 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.611937 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.611946 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:55:00.611954 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:55:00.611963 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:55:00.611971 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:55:00.611981 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:55:00.611990 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:55:00.611999 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:55:00.612006 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:55:00.612019 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612031 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612046 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612063 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612079 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612093 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612112 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:55:00.612128 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612142 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612157 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612171 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612185 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612201 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612217 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612230 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612245 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612259 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612274 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612293 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:55:00.612309 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612323 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:55:00.612338 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:55:00.612354 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612366 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612380 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:55:00.612394 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.612407 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:55:00.629261 17440 logs.go:123] Gathering logs for container status ...
I1229 06:55:00.629293 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:55:00.671242 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
I1229 06:55:00.671279 17440 command_runner.go:130] > 4d49952084c92 550794e3b12ac About a minute ago Running kube-scheduler 2 fefef7c5591ea kube-scheduler-functional-695625 kube-system
I1229 06:55:00.671293 17440 command_runner.go:130] > 6f69ba6a1553a aa5e3ebc0dfed About a minute ago Exited coredns 1 a014f32abcd01 coredns-7d764666f9-wfq7m kube-system
I1229 06:55:00.671303 17440 command_runner.go:130] > d81259f64136c 0a108f7189562 About a minute ago Exited etcd 1 1fc5fa7d92959 etcd-functional-695625 kube-system
I1229 06:55:00.671315 17440 command_runner.go:130] > fb6db97d8ffe4 5c6acd67e9cd1 About a minute ago Exited kube-apiserver 1 4ed2797334771 kube-apiserver-functional-695625 kube-system
I1229 06:55:00.671327 17440 command_runner.go:130] > 17fe16a2822a8 2c9a4b058bd7e About a minute ago Exited kube-controller-manager 1 98261fa185f6e kube-controller-manager-functional-695625 kube-system
I1229 06:55:00.671337 17440 command_runner.go:130] > a79d99ad3fde3 550794e3b12ac About a minute ago Exited kube-scheduler 1 b046056ff071b kube-scheduler-functional-695625 kube-system
I1229 06:55:00.671347 17440 command_runner.go:130] > bd96b57aa9fce 6e38f40d628db 2 minutes ago Exited storage-provisioner 0 64853b50a6c5e storage-provisioner kube-system
I1229 06:55:00.671362 17440 command_runner.go:130] > 8911777281f41 32652ff1bbe6b 2 minutes ago Exited kube-proxy 0 548561c7ada8f kube-proxy-g7lp9 kube-system
I1229 06:55:00.673604 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:55:00.673628 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:55:00.695836 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.077121 2634 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:55:00.695863 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.077418 2634 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:55:00.695877 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.077955 2634 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:55:00.695887 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.109084 2634 factory.go:223] Registration of the containerd container factory successfully
I1229 06:55:00.695901 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.135073 2634 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:55:00.695910 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.137245 2634 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:55:00.695920 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.137294 2634 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:55:00.695934 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.137340 2634 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:55:00.695942 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.209773 2634 cpu_manager.go:225] "Starting" policy="none"
I1229 06:55:00.695952 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.209976 2634 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:55:00.695962 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210050 2634 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:55:00.695975 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210361 2634 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:55:00.696001 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210374 2634 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:55:00.696011 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210392 2634 policy_none.go:50] "Start"
I1229 06:55:00.696020 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210408 2634 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:55:00.696029 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210421 2634 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:55:00.696038 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210527 2634 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:55:00.696045 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210534 2634 policy_none.go:44] "Start"
I1229 06:55:00.696056 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.219245 2634 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:55:00.696067 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.220437 2634 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:55:00.696078 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.220456 2634 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:55:00.696089 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.221071 2634 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:55:00.696114 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.226221 2634 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:55:00.696126 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.239387 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696144 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.239974 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696155 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.240381 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696165 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.262510 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696185 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283041 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696208 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283087 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696228 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283118 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696247 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283135 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696268 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283151 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696288 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283163 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696309 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283175 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696329 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283189 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696357 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283209 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696378 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283223 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696400 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283249 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696416 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.285713 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-controller-manager-functional-695625\" already exists" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696428 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.290012 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-scheduler-functional-695625\" already exists" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696442 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.290269 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"etcd-functional-695625\" already exists" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696454 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.304300 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-apiserver-functional-695625\" already exists" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696466 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.336817 2634 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:55:00.696475 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.351321 2634 kubelet_node_status.go:123] "Node was previously registered" node="functional-695625"
I1229 06:55:00.696486 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.351415 2634 kubelet_node_status.go:77] "Successfully registered node" node="functional-695625"
I1229 06:55:00.696493 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.033797 2634 apiserver.go:52] "Watching apiserver"
I1229 06:55:00.696503 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.077546 2634 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
I1229 06:55:00.696527 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.181689 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/etcd-functional-695625" podStartSLOduration=3.181660018 podStartE2EDuration="3.181660018s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.180947341 +0000 UTC m=+1.223544146" watchObservedRunningTime="2025-12-29 06:52:42.181660018 +0000 UTC m=+1.224256834"
I1229 06:55:00.696555 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.221952 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-controller-manager-functional-695625" podStartSLOduration=3.221936027 podStartE2EDuration="3.221936027s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.202120755 +0000 UTC m=+1.244717560" watchObservedRunningTime="2025-12-29 06:52:42.221936027 +0000 UTC m=+1.264532905"
I1229 06:55:00.696583 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.238774 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-apiserver-functional-695625" podStartSLOduration=3.238759924 podStartE2EDuration="3.238759924s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.238698819 +0000 UTC m=+1.281295638" watchObservedRunningTime="2025-12-29 06:52:42.238759924 +0000 UTC m=+1.281356744"
I1229 06:55:00.696609 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.238905 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-scheduler-functional-695625" podStartSLOduration=3.238868136 podStartE2EDuration="3.238868136s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.224445467 +0000 UTC m=+1.267042290" watchObservedRunningTime="2025-12-29 06:52:42.238868136 +0000 UTC m=+1.281464962"
I1229 06:55:00.696622 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.266475 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696634 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.266615 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696651 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.266971 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696664 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.267487 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696678 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.287234 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-controller-manager-functional-695625\" already exists" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696690 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.287316 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.696704 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.292837 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-apiserver-functional-695625\" already exists" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696718 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.293863 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.696730 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.293764 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-scheduler-functional-695625\" already exists" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696745 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.294163 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.696757 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.298557 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"etcd-functional-695625\" already exists" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696770 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.298633 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.696782 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.272537 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.696807 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.273148 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.696835 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.273501 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.696850 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.273627 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.696863 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: E1229 06:52:44.279056 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.696877 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: E1229 06:52:44.279353 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.696887 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: I1229 06:52:44.754123 2634 kuberuntime_manager.go:2062] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
I1229 06:55:00.696899 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: I1229 06:52:44.756083 2634 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
I1229 06:55:00.696917 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.407560 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94mg5\" (UniqueName: \"kubernetes.io/projected/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-api-access-94mg5\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696938 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.408503 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-proxy\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696958 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.408957 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-xtables-lock\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696976 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.409131 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-lib-modules\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696991 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: E1229 06:52:45.528153 2634 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
I1229 06:55:00.697004 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: E1229 06:52:45.528186 2634 projected.go:196] Error preparing data for projected volume kube-api-access-94mg5 for pod kube-system/kube-proxy-g7lp9: configmap "kube-root-ca.crt" not found
I1229 06:55:00.697032 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: E1229 06:52:45.528293 2634 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-api-access-94mg5 podName:9c2c2ac1-7fa0-427d-b78e-ee14e169895a nodeName:}" failed. No retries permitted until 2025-12-29 06:52:46.028266861 +0000 UTC m=+5.070863673 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-94mg5" (UniqueName: "kubernetes.io/projected/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-api-access-94mg5") pod "kube-proxy-g7lp9" (UID: "9c2c2ac1-7fa0-427d-b78e-ee14e169895a") : configmap "kube-root-ca.crt" not found
I1229 06:55:00.697044 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.406131 2634 kubelet_node_status.go:427] "Fast updating node status as it just became ready"
I1229 06:55:00.697064 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519501 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b64sn\" (UniqueName: \"kubernetes.io/projected/00a95e37-1394-45a7-a376-b195e31e3e9c-kube-api-access-b64sn\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:55:00.697084 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519550 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00a95e37-1394-45a7-a376-b195e31e3e9c-config-volume\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:55:00.697104 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519571 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:55:00.697124 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519587 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:55:00.697138 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.411642 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605"
I1229 06:55:00.697151 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.545186 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.697170 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731196 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/b5f201ca-6d54-4e15-9584-396fb1486f3c-tmp\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:55:00.697192 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731252 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghc5d\" (UniqueName: \"kubernetes.io/projected/b5f201ca-6d54-4e15-9584-396fb1486f3c-kube-api-access-ghc5d\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:55:00.697206 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.628275 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697229 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.634714 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-9mrnn" podStartSLOduration=2.634698273 podStartE2EDuration="2.634698273s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.631484207 +0000 UTC m=+7.674081027" watchObservedRunningTime="2025-12-29 06:52:48.634698273 +0000 UTC m=+7.677295093"
I1229 06:55:00.697245 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.649761 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.697268 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.694857 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-wfq7m" podStartSLOduration=2.694842541 podStartE2EDuration="2.694842541s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.672691157 +0000 UTC m=+7.715287974" watchObservedRunningTime="2025-12-29 06:52:48.694842541 +0000 UTC m=+7.737439360"
I1229 06:55:00.697296 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.728097 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.728082592 podStartE2EDuration="1.728082592s" podCreationTimestamp="2025-12-29 06:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.696376688 +0000 UTC m=+7.738973499" watchObservedRunningTime="2025-12-29 06:52:48.728082592 +0000 UTC m=+7.770679413"
I1229 06:55:00.697310 17440 command_runner.go:130] > Dec 29 06:52:49 functional-695625 kubelet[2634]: E1229 06:52:49.674249 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697322 17440 command_runner.go:130] > Dec 29 06:52:50 functional-695625 kubelet[2634]: E1229 06:52:50.680852 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697336 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.223368 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.697361 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: I1229 06:52:52.243928 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-proxy-g7lp9" podStartSLOduration=7.243911092 podStartE2EDuration="7.243911092s" podCreationTimestamp="2025-12-29 06:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.744380777 +0000 UTC m=+7.786977597" watchObservedRunningTime="2025-12-29 06:52:52.243911092 +0000 UTC m=+11.286507895"
I1229 06:55:00.697376 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.396096 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.697388 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.693687 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.697402 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: E1229 06:52:53.390926 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.697420 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979173 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:55:00.697442 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979225 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:55:00.697463 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979732 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue ""
I1229 06:55:00.697483 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.981248 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "kube-api-access-lc5xj". PluginName "kubernetes.io/projected", VolumeGIDValue ""
I1229 06:55:00.697499 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079447 2634 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") on node \"functional-695625\" DevicePath \"\""
I1229 06:55:00.697515 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079521 2634 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") on node \"functional-695625\" DevicePath \"\""
I1229 06:55:00.697526 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.715729 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697536 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.756456 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697554 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: E1229 06:52:54.758451 2634 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697576 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.758508 2634 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"} err="failed to get container status \"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697591 17440 command_runner.go:130] > Dec 29 06:52:55 functional-695625 kubelet[2634]: I1229 06:52:55.144582 2634 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="c4313c5f-3b86-48de-8f3c-02d7e007542a" path="/var/lib/kubelet/pods/c4313c5f-3b86-48de-8f3c-02d7e007542a/volumes"
I1229 06:55:00.697608 17440 command_runner.go:130] > Dec 29 06:52:58 functional-695625 kubelet[2634]: E1229 06:52:58.655985 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.697621 17440 command_runner.go:130] > Dec 29 06:53:20 functional-695625 kubelet[2634]: E1229 06:53:20.683378 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697637 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913108 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697651 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913180 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697669 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913193 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697710 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915141 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697726 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915181 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697746 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915192 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697762 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139490 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.697775 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139600 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697790 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139623 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697815 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139634 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697830 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917175 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697846 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917271 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697860 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917284 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697876 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918722 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697892 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918780 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697906 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918792 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697923 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139097 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.697937 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139170 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697951 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139187 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697966 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139214 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697986 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921730 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698002 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921808 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698029 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921823 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698046 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.923664 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698060 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924161 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698081 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924185 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698097 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139396 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698113 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139458 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698126 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139472 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698141 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139485 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698155 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239338 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698172 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239383 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698187 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244411 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:55:00.698202 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244504 2634 container_log_manager.go:154] "Failed to rotate container logs" err="failed to list containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698218 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458139 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:55:00.698235 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458218 2634 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to list pod stats: failed to get pod or container map: failed to list all containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698274 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926377 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698293 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926435 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698309 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926447 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698325 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.994121 2634 log.go:32] "Version from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698341 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927827 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698362 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927867 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698378 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927930 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698395 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140553 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698408 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140635 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698424 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140653 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698439 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140664 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698455 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930020 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698469 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930083 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698484 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930129 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698501 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932311 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698514 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932363 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698527 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932375 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698541 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140618 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698554 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140679 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698577 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140697 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698590 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140709 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698606 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933321 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698620 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933382 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698634 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933393 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698650 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241324 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698666 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241391 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698682 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935649 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698696 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935930 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698711 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935948 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698727 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140389 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698743 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140507 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698756 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140525 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698769 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140536 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698784 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937258 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698808 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937350 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698823 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937364 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698840 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939069 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698853 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939129 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698868 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939141 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698886 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139354 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698903 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139413 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698916 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139428 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698933 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139440 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698948 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941237 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698962 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941285 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698976 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941296 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698993 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.109014 2634 kubelet.go:2691] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="6.97s"
I1229 06:55:00.699007 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.125762 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:55:00.699018 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.129855 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699031 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.131487 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.699042 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.140438 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.699055 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.699067 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:55:00.699078 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:55:00.699093 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.699105 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.699119 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.699130 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:55:00.699145 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.699157 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.699180 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:55:00.699195 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:55:00.699207 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:55:00.699224 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:55:00.699243 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:55:00.699256 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:55:00.699269 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.699284 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.699310 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.699330 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.699343 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:55:00.699362 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:55:00.699380 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.699407 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:55:00.699439 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:55:00.699460 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:55:00.699477 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.699497 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.699515 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:55:00.699533 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.699619 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.699640 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.699660 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:55:00.699683 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:55:00.699709 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:55:00.699722 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:55:00.699738 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699750 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:55:00.699763 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.699774 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:55:00.699785 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.699807 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:55:00.699820 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.699834 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699846 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.699861 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.699872 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.699886 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.699931 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699946 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.699956 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:55:00.699972 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700008 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:55:00.700031 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.700053 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700067 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:55:00.700078 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:55:00.700091 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:55:00.700102 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.700116 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:55:00.700129 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.700139 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:55:00.700159 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.700168 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:55:00.700179 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:55:00.700190 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700199 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:55:00.700217 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700228 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.700240 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:55:00.700250 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:55:00.700268 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.700281 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.700291 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:55:00.700310 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.700321 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.700331 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:55:00.700349 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.700364 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700375 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:55:00.700394 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700405 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.700415 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.700427 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.700454 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:55:00.700474 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.700515 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:55:00.700529 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700539 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:55:00.700558 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700570 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:55:00.700578 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:55:00.700584 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:55:00.700590 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:55:00.700597 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:55:00.700603 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:55:00.700612 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:55:00.700620 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.700631 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:55:00.700641 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:55:00.700652 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:55:00.700662 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:55:00.700674 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:55:00.700684 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:55:00.700696 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:55:00.700707 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:55:00.700717 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:55:00.700758 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:55:00.700770 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:55:00.700779 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:55:00.700790 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:55:00.700816 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:55:00.700831 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:55:00.700846 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:55:00.700858 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:55:00.700866 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:55:00.700879 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:55:00.700891 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:55:00.700905 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:55:00.700912 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:55:00.700921 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:55:00.700932 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.700943 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:55:00.700951 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:55:00.700963 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:55:00.700971 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:55:00.700986 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:55:00.701000 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:55:00.701008 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:55:00.701020 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:55:00.701029 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:55:00.701037 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:55:00.701046 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:55:00.701061 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:55:00.701073 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:55:00.701082 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:55:00.701093 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:55:00.701100 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:55:00.701114 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:55:00.701124 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:55:00.701143 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.701160 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.701170 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:55:00.701178 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:55:00.701188 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:55:00.701201 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:55:00.701210 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:55:00.701218 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:55:00.701226 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:55:00.701237 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:55:00.701246 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:55:00.701256 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:55:00.701266 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:55:00.701277 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:55:00.701287 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:55:00.701297 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:55:00.701308 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:55:00.701322 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701334 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701348 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701361 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:55:00.701372 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:55:00.701385 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:55:00.701399 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:55:00.701410 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:55:00.701422 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:55:00.701433 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701447 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:55:00.701458 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:55:00.701471 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:55:00.701483 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.701496 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:55:00.701508 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:55:00.701521 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:55:00.701533 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701550 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:55:00.701567 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:55:00.701581 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701592 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701611 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:55:00.701625 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701642 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:55:00.701678 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:55:00.701695 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:55:00.701705 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:55:00.701716 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701735 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:55:00.701749 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701764 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:55:00.701780 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:55:00.701807 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.701827 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.701847 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701867 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701886 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.701907 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.701928 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701948 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701971 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701995 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.702014 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.702027 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.755255 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:55:00.755293 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:55:00.771031 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:55:00.771066 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:55:00.771079 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:55:00.771088 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:55:00.771097 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:55:00.771103 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:55:00.771109 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:55:00.771116 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:55:00.771121 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:55:00.771126 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:55:00.771131 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:55:00.771136 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:55:00.771143 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:55:00.771153 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:55:00.771158 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:55:00.771165 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:55:00.771175 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:55:00.771185 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:55:00.771191 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:55:00.771196 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:55:00.771202 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:55:00.772218 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:55:00.772246 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:56:00.863293 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:56:00.863340 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.091082059s)
W1229 06:56:00.863385 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:56:00.863402 17440 logs.go:123] Gathering logs for kube-apiserver [fb6db97d8ffe] ...
I1229 06:56:00.863420 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 fb6db97d8ffe"
I1229 06:56:00.897112 17440 command_runner.go:130] ! I1229 06:53:50.588377 1 options.go:263] external host was not specified, using 192.168.39.121
I1229 06:56:00.897142 17440 command_runner.go:130] ! I1229 06:53:50.597275 1 server.go:150] Version: v1.35.0
I1229 06:56:00.897153 17440 command_runner.go:130] ! I1229 06:53:50.597323 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:00.897164 17440 command_runner.go:130] ! E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
W1229 06:56:00.898716 17440 logs.go:138] Found kube-apiserver [fb6db97d8ffe] problem: E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:56:00.898738 17440 logs.go:123] Gathering logs for etcd [d81259f64136] ...
I1229 06:56:00.898750 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 d81259f64136"
I1229 06:56:00.935530 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517725Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:00.938590 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517828Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:56:00.938653 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.517848Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:56:00.938666 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519323Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:56:00.938679 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.519372Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:00.938689 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519700Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:56:00.938712 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.522332Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:00.938728 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.530852Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:56:00.938838 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.531312Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:56:00.938875 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.533505Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc00006a930}"}
I1229 06:56:00.938892 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.562961Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:56:00.938902 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.566967Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"33.344174ms"}
I1229 06:56:00.938913 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.569353Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":497}
I1229 06:56:00.938922 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596637Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:56:00.938935 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596694Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:56:00.938946 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596795Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:56:00.938958 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.620855Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":497}
I1229 06:56:00.938969 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.621587Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:56:00.938978 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624518Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:56:00.938993 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624664Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:56:00.939003 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624700Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:56:00.939022 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624712Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:56:00.939035 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624720Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:56:00.939046 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624728Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:56:00.939053 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624764Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:56:00.939062 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624799Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:56:00.939071 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624856Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 2"}
I1229 06:56:00.939081 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624867Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 2, commit: 497, applied: 0, lastindex: 497, lastterm: 2]"}
I1229 06:56:00.939091 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.634002Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:56:00.939111 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.644772Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:56:00.939126 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.681530Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:56:00.939142 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686046Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:56:00.939162 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686350Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:56:00.939181 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686391Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:56:00.939213 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687141Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:00.939249 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687399Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:56:00.939258 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687425Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:56:00.939274 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687475Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:56:00.939289 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687536Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:56:00.939302 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687564Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:56:00.939324 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687571Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:56:00.939342 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687702Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:00.939352 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687713Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:56:00.939362 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692750Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:56:00.939377 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692847Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:56:00.939389 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.694703Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:56:00.939404 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830725Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 2"}
I1229 06:56:00.939423 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830765Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 2"}
I1229 06:56:00.939439 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830844Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 2"}
I1229 06:56:00.939458 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830858Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:56:00.939467 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830936Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 3"}
I1229 06:56:00.939478 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832101Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:56:00.939494 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832130Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:56:00.939513 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832148Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 3"}
I1229 06:56:00.939528 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832166Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 3"}
I1229 06:56:00.939544 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835446Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:00.939564 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835384Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:56:00.939586 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839145Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:00.939603 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839733Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:00.939616 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851017Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:56:00.939882 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851067Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:56:00.939915 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851748Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:00.939932 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.856729Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:56:00.939947 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.869216Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:56:00.939960 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706108Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
I1229 06:56:00.939998 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706269Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:00.940030 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:51.706381Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:00.940064 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.707655Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:00.940122 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.709799Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940150 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.709913Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cbdf275f553df7c2","current-leader-member-id":"cbdf275f553df7c2"}
I1229 06:56:00.940167 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710255Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
I1229 06:56:00.940187 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710690Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:00.940204 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710782Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:00.940257 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.710832Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940277 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710742Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
I1229 06:56:00.940301 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711002Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:00.940334 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711035Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:00.940371 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.711045Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940389 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717452Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:00.940425 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.717551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940447 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717601Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:00.940473 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717654Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:00.955065 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:56:00.955108 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
W1229 06:56:00.955188 17440 out.go:285] X Problems detected in kube-apiserver [fb6db97d8ffe]:
X Problems detected in kube-apiserver [fb6db97d8ffe]:
W1229 06:56:00.955202 17440 out.go:285] E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:56:00.955209 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:56:00.955215 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:56:10.957344 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:56:15.961183 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:56:15.961319 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I1229 06:56:15.981705 17440 command_runner.go:130] > 18d0015c724a
I1229 06:56:15.982641 17440 logs.go:282] 1 containers: [18d0015c724a]
I1229 06:56:15.982732 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I1229 06:56:16.002259 17440 command_runner.go:130] > 6b7711ee25a2
I1229 06:56:16.002292 17440 command_runner.go:130] > d81259f64136
I1229 06:56:16.002322 17440 logs.go:282] 2 containers: [6b7711ee25a2 d81259f64136]
I1229 06:56:16.002399 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I1229 06:56:16.021992 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:56:16.022032 17440 logs.go:282] 1 containers: [6f69ba6a1553]
I1229 06:56:16.022113 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I1229 06:56:16.048104 17440 command_runner.go:130] > 4d49952084c9
I1229 06:56:16.048133 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:56:16.049355 17440 logs.go:282] 2 containers: [4d49952084c9 a79d99ad3fde]
I1229 06:56:16.049441 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I1229 06:56:16.071523 17440 command_runner.go:130] > 8911777281f4
I1229 06:56:16.072578 17440 logs.go:282] 1 containers: [8911777281f4]
I1229 06:56:16.072668 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I1229 06:56:16.092921 17440 command_runner.go:130] > f48fc04e3475
I1229 06:56:16.092948 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:56:16.092975 17440 logs.go:282] 2 containers: [f48fc04e3475 17fe16a2822a]
I1229 06:56:16.093047 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I1229 06:56:16.113949 17440 logs.go:282] 0 containers: []
W1229 06:56:16.113983 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:56:16.114047 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I1229 06:56:16.135700 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:56:16.135739 17440 logs.go:282] 1 containers: [bd96b57aa9fc]
I1229 06:56:16.135766 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:56:16.135786 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:56:16.152008 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:56:16.152038 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:56:16.152046 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:56:16.152054 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:56:16.152063 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:56:16.152069 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:56:16.152076 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:56:16.152081 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:56:16.152086 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:56:16.152091 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:56:16.152096 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:56:16.152102 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:56:16.152107 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:56:16.152112 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:56:16.152119 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:56:16.152128 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:56:16.152148 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:56:16.152164 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:56:16.152180 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:56:16.152190 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:56:16.152201 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:56:16.152209 17440 command_runner.go:130] > [Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
I1229 06:56:16.152217 17440 command_runner.go:130] > [ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
I1229 06:56:16.153163 17440 logs.go:123] Gathering logs for kube-proxy [8911777281f4] ...
I1229 06:56:16.153192 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 8911777281f4"
I1229 06:56:16.174824 17440 command_runner.go:130] ! I1229 06:52:47.703648 1 server_linux.go:53] "Using iptables proxy"
I1229 06:56:16.174856 17440 command_runner.go:130] ! I1229 06:52:47.791676 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:56:16.174862 17440 command_runner.go:130] ! I1229 06:52:47.897173 1 shared_informer.go:377] "Caches are synced"
I1229 06:56:16.174873 17440 command_runner.go:130] ! I1229 06:52:47.900073 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.121"]
I1229 06:56:16.174892 17440 command_runner.go:130] ! E1229 06:52:47.906310 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1229 06:56:16.174900 17440 command_runner.go:130] ! I1229 06:52:48.206121 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
I1229 06:56:16.174913 17440 command_runner.go:130] ! error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
I1229 06:56:16.174920 17440 command_runner.go:130] ! Perhaps ip6tables or your kernel needs to be upgraded.
I1229 06:56:16.174924 17440 command_runner.go:130] ! >
I1229 06:56:16.174931 17440 command_runner.go:130] ! I1229 06:52:48.209509 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1229 06:56:16.174941 17440 command_runner.go:130] ! I1229 06:52:48.210145 1 server_linux.go:136] "Using iptables Proxier"
I1229 06:56:16.174957 17440 command_runner.go:130] ! I1229 06:52:48.253805 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1229 06:56:16.174966 17440 command_runner.go:130] ! I1229 06:52:48.255046 1 server.go:529] "Version info" version="v1.35.0"
I1229 06:56:16.174975 17440 command_runner.go:130] ! I1229 06:52:48.255076 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.174985 17440 command_runner.go:130] ! I1229 06:52:48.262205 1 config.go:200] "Starting service config controller"
I1229 06:56:16.174994 17440 command_runner.go:130] ! I1229 06:52:48.262238 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1229 06:56:16.175003 17440 command_runner.go:130] ! I1229 06:52:48.262258 1 config.go:106] "Starting endpoint slice config controller"
I1229 06:56:16.175012 17440 command_runner.go:130] ! I1229 06:52:48.262261 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1229 06:56:16.175024 17440 command_runner.go:130] ! I1229 06:52:48.262278 1 config.go:403] "Starting serviceCIDR config controller"
I1229 06:56:16.175033 17440 command_runner.go:130] ! I1229 06:52:48.262282 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1229 06:56:16.175040 17440 command_runner.go:130] ! I1229 06:52:48.270608 1 config.go:309] "Starting node config controller"
I1229 06:56:16.175050 17440 command_runner.go:130] ! I1229 06:52:48.271311 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1229 06:56:16.175074 17440 command_runner.go:130] ! I1229 06:52:48.271337 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1229 06:56:16.175325 17440 command_runner.go:130] ! I1229 06:52:48.363324 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1229 06:56:16.175351 17440 command_runner.go:130] ! I1229 06:52:48.363427 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1229 06:56:16.175362 17440 command_runner.go:130] ! I1229 06:52:48.363671 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1229 06:56:16.177120 17440 logs.go:123] Gathering logs for container status ...
I1229 06:56:16.177144 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:56:16.222627 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
I1229 06:56:16.222665 17440 command_runner.go:130] > 18d0015c724a8 5c6acd67e9cd1 5 seconds ago Exited kube-apiserver 3 d3819cc8ab802 kube-apiserver-functional-695625 kube-system
I1229 06:56:16.222684 17440 command_runner.go:130] > f48fc04e34751 2c9a4b058bd7e 16 seconds ago Running kube-controller-manager 2 0a96e34d38f8c kube-controller-manager-functional-695625 kube-system
I1229 06:56:16.222707 17440 command_runner.go:130] > 6b7711ee25a2d 0a108f7189562 16 seconds ago Running etcd 2 173054afc2f39 etcd-functional-695625 kube-system
I1229 06:56:16.222730 17440 command_runner.go:130] > 4d49952084c92 550794e3b12ac 2 minutes ago Running kube-scheduler 2 fefef7c5591ea kube-scheduler-functional-695625 kube-system
I1229 06:56:16.222749 17440 command_runner.go:130] > 6f69ba6a1553a aa5e3ebc0dfed 2 minutes ago Exited coredns 1 a014f32abcd01 coredns-7d764666f9-wfq7m kube-system
I1229 06:56:16.222768 17440 command_runner.go:130] > d81259f64136c 0a108f7189562 2 minutes ago Exited etcd 1 1fc5fa7d92959 etcd-functional-695625 kube-system
I1229 06:56:16.222810 17440 command_runner.go:130] > 17fe16a2822a8 2c9a4b058bd7e 2 minutes ago Exited kube-controller-manager 1 98261fa185f6e kube-controller-manager-functional-695625 kube-system
I1229 06:56:16.222831 17440 command_runner.go:130] > a79d99ad3fde3 550794e3b12ac 2 minutes ago Exited kube-scheduler 1 b046056ff071b kube-scheduler-functional-695625 kube-system
I1229 06:56:16.222851 17440 command_runner.go:130] > bd96b57aa9fce 6e38f40d628db 3 minutes ago Exited storage-provisioner 0 64853b50a6c5e storage-provisioner kube-system
I1229 06:56:16.222879 17440 command_runner.go:130] > 8911777281f41 32652ff1bbe6b 3 minutes ago Exited kube-proxy 0 548561c7ada8f kube-proxy-g7lp9 kube-system
I1229 06:56:16.225409 17440 logs.go:123] Gathering logs for etcd [6b7711ee25a2] ...
I1229 06:56:16.225439 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6b7711ee25a2"
I1229 06:56:16.247416 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.924768Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.247449 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925193Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:56:16.247516 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925252Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:56:16.247533 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925487Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:56:16.247545 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925602Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.247555 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925710Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:56:16.247582 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925810Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.247605 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.934471Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:56:16.247698 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.935217Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:56:16.247722 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.937503Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc000068080}"}
I1229 06:56:16.247733 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940423Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:56:16.247745 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940850Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"3.479356ms"}
I1229 06:56:16.247753 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.941120Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":499}
I1229 06:56:16.247762 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945006Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:56:16.247774 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945707Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:56:16.247782 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945966Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:56:16.247807 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.951906Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":499}
I1229 06:56:16.247816 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952063Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:56:16.247825 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952160Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:56:16.247840 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952338Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:56:16.247851 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952385Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:56:16.247867 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952396Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:56:16.247878 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952406Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:56:16.247886 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952416Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:56:16.247893 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952460Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:56:16.247902 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952535Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:56:16.247914 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952604Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 3"}
I1229 06:56:16.247924 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952619Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 3, commit: 499, applied: 0, lastindex: 499, lastterm: 3]"}
I1229 06:56:16.247935 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.955095Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:56:16.247952 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.961356Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:56:16.247965 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.967658Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:56:16.247975 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.968487Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:56:16.247988 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969020Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:56:16.248000 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969260Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:56:16.248016 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969708Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:56:16.248035 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970043Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.248063 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970828Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:56:16.248074 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971046Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:56:16.248083 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970057Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.248092 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971258Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:56:16.248103 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970152Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:56:16.248113 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971336Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:56:16.248126 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971370Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:56:16.248136 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970393Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:56:16.248153 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972410Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:56:16.248166 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972698Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:56:16.248177 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353814Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 3"}
I1229 06:56:16.248186 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353865Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 3"}
I1229 06:56:16.248198 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353992Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:56:16.248208 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354007Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:56:16.248219 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354031Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 4"}
I1229 06:56:16.248228 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355853Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 4"}
I1229 06:56:16.248240 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355920Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:56:16.248248 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355940Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 4"}
I1229 06:56:16.248260 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.356018Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 4"}
I1229 06:56:16.248275 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358237Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:56:16.248287 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358323Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.248295 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358268Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.248304 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358820Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:56:16.248312 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358859Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:56:16.248320 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360417Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.248331 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360952Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.248341 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363710Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:56:16.248352 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363760Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:56:16.254841 17440 logs.go:123] Gathering logs for etcd [d81259f64136] ...
I1229 06:56:16.254869 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 d81259f64136"
I1229 06:56:16.278647 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517725Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.278679 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517828Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:56:16.278723 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.517848Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:56:16.278736 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519323Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:56:16.278750 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.519372Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.278759 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519700Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:56:16.278780 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.522332Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.278809 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.530852Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:56:16.278890 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.531312Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:56:16.278913 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.533505Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc00006a930}"}
I1229 06:56:16.278923 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.562961Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:56:16.278935 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.566967Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"33.344174ms"}
I1229 06:56:16.278946 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.569353Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":497}
I1229 06:56:16.278957 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596637Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:56:16.278971 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596694Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:56:16.278982 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596795Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:56:16.278996 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.620855Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":497}
I1229 06:56:16.279006 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.621587Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:56:16.279014 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624518Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:56:16.279031 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624664Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:56:16.279040 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624700Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:56:16.279072 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624712Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:56:16.279083 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624720Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:56:16.279091 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624728Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:56:16.279101 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624764Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:56:16.279110 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624799Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:56:16.279121 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624856Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 2"}
I1229 06:56:16.279132 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624867Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 2, commit: 497, applied: 0, lastindex: 497, lastterm: 2]"}
I1229 06:56:16.279142 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.634002Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:56:16.279159 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.644772Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:56:16.279173 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.681530Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:56:16.279183 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686046Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:56:16.279195 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686350Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:56:16.279208 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686391Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:56:16.279226 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687141Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.279249 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687399Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:56:16.279260 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687425Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:56:16.279275 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687475Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:56:16.279289 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687536Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:56:16.279300 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687564Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:56:16.279313 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687571Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:56:16.279322 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687702Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.279332 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687713Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:56:16.279343 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692750Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:56:16.279359 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692847Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:56:16.279374 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.694703Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:56:16.279386 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830725Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 2"}
I1229 06:56:16.279396 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830765Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 2"}
I1229 06:56:16.279406 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830844Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 2"}
I1229 06:56:16.279418 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830858Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:56:16.279429 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830936Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 3"}
I1229 06:56:16.279439 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832101Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:56:16.279451 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832130Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:56:16.279460 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832148Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 3"}
I1229 06:56:16.279469 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832166Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 3"}
I1229 06:56:16.279479 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835446Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.279494 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835384Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:56:16.279503 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839145Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.279513 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839733Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.279523 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851017Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:56:16.279531 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851067Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:56:16.279541 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851748Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.279551 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.856729Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:56:16.279562 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.869216Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:56:16.279570 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706108Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
I1229 06:56:16.279585 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706269Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:16.279603 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:51.706381Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:16.279622 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.707655Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:16.279661 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.709799Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279676 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.709913Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cbdf275f553df7c2","current-leader-member-id":"cbdf275f553df7c2"}
I1229 06:56:16.279688 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710255Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
I1229 06:56:16.279698 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710690Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:16.279711 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710782Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:16.279730 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.710832Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279741 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710742Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
I1229 06:56:16.279751 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711002Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:16.279764 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711035Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:16.279785 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.711045Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279805 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717452Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.279825 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.717551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279836 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717601Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.279852 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717654Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:16.287590 17440 logs.go:123] Gathering logs for coredns [6f69ba6a1553] ...
I1229 06:56:16.287613 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6f69ba6a1553"
I1229 06:56:16.310292 17440 logs.go:123] Gathering logs for storage-provisioner [bd96b57aa9fc] ...
I1229 06:56:16.310320 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 bd96b57aa9fc"
I1229 06:56:16.331009 17440 command_runner.go:130] ! I1229 06:52:48.539098 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1229 06:56:16.331044 17440 command_runner.go:130] ! I1229 06:52:48.550309 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1229 06:56:16.331054 17440 command_runner.go:130] ! I1229 06:52:48.550373 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1229 06:56:16.331067 17440 command_runner.go:130] ! W1229 06:52:48.552935 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331076 17440 command_runner.go:130] ! W1229 06:52:48.563735 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331083 17440 command_runner.go:130] ! I1229 06:52:48.564362 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1229 06:56:16.331093 17440 command_runner.go:130] ! I1229 06:52:48.565422 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:56:16.331114 17440 command_runner.go:130] ! I1229 06:52:48.565143 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cfb65e16-c2f7-4c19-a059-8ef64f8f3f2e", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868 became leader
I1229 06:56:16.331232 17440 command_runner.go:130] ! W1229 06:52:48.576668 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331256 17440 command_runner.go:130] ! W1229 06:52:48.582743 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331268 17440 command_runner.go:130] ! I1229 06:52:48.665711 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:56:16.331275 17440 command_runner.go:130] ! W1229 06:52:50.588204 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331289 17440 command_runner.go:130] ! W1229 06:52:50.593815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331298 17440 command_runner.go:130] ! W1229 06:52:52.597431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331316 17440 command_runner.go:130] ! W1229 06:52:52.602815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331329 17440 command_runner.go:130] ! W1229 06:52:54.606663 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331341 17440 command_runner.go:130] ! W1229 06:52:54.612650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331355 17440 command_runner.go:130] ! W1229 06:52:56.616395 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331363 17440 command_runner.go:130] ! W1229 06:52:56.622404 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331374 17440 command_runner.go:130] ! W1229 06:52:58.626804 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331386 17440 command_runner.go:130] ! W1229 06:52:58.637257 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331400 17440 command_runner.go:130] ! W1229 06:53:00.640728 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331413 17440 command_runner.go:130] ! W1229 06:53:00.646446 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331425 17440 command_runner.go:130] ! W1229 06:53:02.650659 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331441 17440 command_runner.go:130] ! W1229 06:53:02.660374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331454 17440 command_runner.go:130] ! W1229 06:53:04.664091 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331468 17440 command_runner.go:130] ! W1229 06:53:04.669806 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331478 17440 command_runner.go:130] ! W1229 06:53:06.674203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331488 17440 command_runner.go:130] ! W1229 06:53:06.680002 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331496 17440 command_runner.go:130] ! W1229 06:53:08.683483 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331506 17440 command_runner.go:130] ! W1229 06:53:08.688934 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331519 17440 command_runner.go:130] ! W1229 06:53:10.693644 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331529 17440 command_runner.go:130] ! W1229 06:53:10.706122 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331537 17440 command_runner.go:130] ! W1229 06:53:12.709949 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331547 17440 command_runner.go:130] ! W1229 06:53:12.715753 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331555 17440 command_runner.go:130] ! W1229 06:53:14.719191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331564 17440 command_runner.go:130] ! W1229 06:53:14.728100 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331572 17440 command_runner.go:130] ! W1229 06:53:16.731658 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331580 17440 command_runner.go:130] ! W1229 06:53:16.737463 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331592 17440 command_runner.go:130] ! W1229 06:53:18.741304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331604 17440 command_runner.go:130] ! W1229 06:53:18.746708 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331618 17440 command_runner.go:130] ! W1229 06:53:20.749662 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331629 17440 command_runner.go:130] ! W1229 06:53:20.755989 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331645 17440 command_runner.go:130] ! W1229 06:53:22.760815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331659 17440 command_runner.go:130] ! W1229 06:53:22.772421 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331673 17440 command_runner.go:130] ! W1229 06:53:24.776403 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331689 17440 command_runner.go:130] ! W1229 06:53:24.783232 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331703 17440 command_runner.go:130] ! W1229 06:53:26.786665 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331716 17440 command_runner.go:130] ! W1229 06:53:26.792239 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331728 17440 command_runner.go:130] ! W1229 06:53:28.796420 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331740 17440 command_runner.go:130] ! W1229 06:53:28.805511 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331756 17440 command_runner.go:130] ! W1229 06:53:30.808544 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331771 17440 command_runner.go:130] ! W1229 06:53:30.816066 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331784 17440 command_runner.go:130] ! W1229 06:53:32.820090 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331816 17440 command_runner.go:130] ! W1229 06:53:32.826208 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331830 17440 command_runner.go:130] ! W1229 06:53:34.829865 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331847 17440 command_runner.go:130] ! W1229 06:53:34.835774 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331863 17440 command_runner.go:130] ! W1229 06:53:36.839291 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331879 17440 command_runner.go:130] ! W1229 06:53:36.853251 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331894 17440 command_runner.go:130] ! W1229 06:53:38.856432 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331908 17440 command_runner.go:130] ! W1229 06:53:38.862360 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.336243 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:56:16.336267 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:56:16.358115 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358145 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.358155 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.358165 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.358177 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.358186 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:56:16.358194 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:56:16.358203 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358209 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.358220 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:56:16.358229 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.358241 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.358254 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.358266 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.358278 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.358285 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:56:16.358307 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.358315 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:56:16.358328 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.358336 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:56:16.358343 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:56:16.358350 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358360 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.358369 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.358377 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.358385 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.358399 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.358408 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.358415 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358425 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.358436 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.358445 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.358455 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:56:16.358463 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:56:16.358474 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:56:16.358481 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:56:16.358491 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:56:16.358500 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:56:16.358508 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:56:16.358515 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358530 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:56:16.358543 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:56:16.358555 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.358576 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.358584 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:56:16.358593 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:56:16.358604 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:56:16.358614 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:56:16.358621 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:56:16.358628 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:56:16.358635 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.358644 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:56:16.358653 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.358666 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.358685 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.358697 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.358707 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.358716 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:56:16.358735 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.358745 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:56:16.358755 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.358763 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:56:16.358805 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.358818 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.358827 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.358837 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.358847 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.358854 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.358861 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358867 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:56:16.358874 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358881 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358893 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.358904 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.358913 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.358921 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:56:16.358930 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:56:16.358942 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:56:16.358950 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:56:16.358959 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:56:16.358970 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:56:16.358979 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:56:16.358986 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358992 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:56:16.359001 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:56:16.359011 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:56:16.359021 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:56:16.359029 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:56:16.359036 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:56:16.359042 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.359052 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:56:16.359060 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.359071 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.359084 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.359094 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.359106 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.359113 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:56:16.359135 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.359144 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:56:16.359154 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.359164 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:56:16.359172 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.359182 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.359190 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.359198 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.359206 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.359213 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.359244 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359260 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359275 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359288 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359300 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:56:16.359313 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359328 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359343 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359357 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359372 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359386 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359399 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.359410 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.359422 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:56:16.359435 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:56:16.359442 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:56:16.359452 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:56:16.359460 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:56:16.359468 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:56:16.359474 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:56:16.359481 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:56:16.359487 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:56:16.359494 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.359502 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:56:16.359511 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.359521 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.359532 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.359544 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.359553 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.359561 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:56:16.359574 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359590 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359602 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359617 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359630 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359646 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359660 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359676 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359689 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359706 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359719 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359731 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359744 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359763 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.359779 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:56:16.359800 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:56:16.359813 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:56:16.359827 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:56:16.359837 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:56:16.359852 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:56:16.359864 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:56:16.359878 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:56:16.359890 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:56:16.359904 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:56:16.359916 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:56:16.359932 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:56:16.359945 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:56:16.359960 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:56:16.359975 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:56:16.359988 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:56:16.360003 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:56:16.360019 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:56:16.360037 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:56:16.360051 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:56:16.360064 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:56:16.360074 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:56:16.360085 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.360093 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:56:16.360102 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.360113 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.360121 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.360130 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.360163 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.360172 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.360189 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:56:16.360197 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:56:16.360204 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:56:16.360210 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:56:16.360218 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:56:16.360225 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.360236 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.360245 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.360255 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.360263 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:56:16.360271 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:56:16.360280 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:56:16.360288 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:56:16.360297 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:56:16.360308 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:56:16.360317 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:56:16.360326 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:56:16.360338 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360353 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360365 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360380 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360392 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360410 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360426 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:56:16.360441 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360454 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360467 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360482 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360494 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360510 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360525 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360538 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360553 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360566 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360582 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360599 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:56:16.360617 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360628 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:56:16.360643 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:56:16.360656 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360671 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360682 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:56:16.360699 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.360711 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:56:16.360726 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.360736 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:56:16.360749 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 dockerd[4014]: time="2025-12-29T06:56:00.626282205Z" level=info msg="ignoring event" container=78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360762 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 dockerd[4014]: time="2025-12-29T06:56:11.553142622Z" level=info msg="ignoring event" container=18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.377860 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:56:16.377891 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:56:16.394828 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.406131 2634 kubelet_node_status.go:427] "Fast updating node status as it just became ready"
I1229 06:56:16.394877 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519501 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b64sn\" (UniqueName: \"kubernetes.io/projected/00a95e37-1394-45a7-a376-b195e31e3e9c-kube-api-access-b64sn\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:56:16.394896 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519550 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00a95e37-1394-45a7-a376-b195e31e3e9c-config-volume\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:56:16.394920 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519571 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:56:16.394952 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519587 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:56:16.394976 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.411642 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605"
I1229 06:56:16.394988 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.545186 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.395012 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731196 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/b5f201ca-6d54-4e15-9584-396fb1486f3c-tmp\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:56:16.395045 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731252 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghc5d\" (UniqueName: \"kubernetes.io/projected/b5f201ca-6d54-4e15-9584-396fb1486f3c-kube-api-access-ghc5d\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:56:16.395075 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.628275 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395109 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.634714 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-9mrnn" podStartSLOduration=2.634698273 podStartE2EDuration="2.634698273s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.631484207 +0000 UTC m=+7.674081027" watchObservedRunningTime="2025-12-29 06:52:48.634698273 +0000 UTC m=+7.677295093"
I1229 06:56:16.395143 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.649761 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.395179 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.694857 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-wfq7m" podStartSLOduration=2.694842541 podStartE2EDuration="2.694842541s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.672691157 +0000 UTC m=+7.715287974" watchObservedRunningTime="2025-12-29 06:52:48.694842541 +0000 UTC m=+7.737439360"
I1229 06:56:16.395221 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.728097 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.728082592 podStartE2EDuration="1.728082592s" podCreationTimestamp="2025-12-29 06:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.696376688 +0000 UTC m=+7.738973499" watchObservedRunningTime="2025-12-29 06:52:48.728082592 +0000 UTC m=+7.770679413"
I1229 06:56:16.395242 17440 command_runner.go:130] > Dec 29 06:52:49 functional-695625 kubelet[2634]: E1229 06:52:49.674249 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395263 17440 command_runner.go:130] > Dec 29 06:52:50 functional-695625 kubelet[2634]: E1229 06:52:50.680852 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395283 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.223368 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.395324 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: I1229 06:52:52.243928 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-proxy-g7lp9" podStartSLOduration=7.243911092 podStartE2EDuration="7.243911092s" podCreationTimestamp="2025-12-29 06:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.744380777 +0000 UTC m=+7.786977597" watchObservedRunningTime="2025-12-29 06:52:52.243911092 +0000 UTC m=+11.286507895"
I1229 06:56:16.395347 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.396096 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.395368 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.693687 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.395390 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: E1229 06:52:53.390926 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.395423 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979173 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:56:16.395451 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979225 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:56:16.395496 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979732 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue ""
I1229 06:56:16.395529 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.981248 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "kube-api-access-lc5xj". PluginName "kubernetes.io/projected", VolumeGIDValue ""
I1229 06:56:16.395551 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079447 2634 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") on node \"functional-695625\" DevicePath \"\""
I1229 06:56:16.395578 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079521 2634 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") on node \"functional-695625\" DevicePath \"\""
I1229 06:56:16.395597 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.715729 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395618 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.756456 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395641 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: E1229 06:52:54.758451 2634 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395678 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.758508 2634 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"} err="failed to get container status \"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395702 17440 command_runner.go:130] > Dec 29 06:52:55 functional-695625 kubelet[2634]: I1229 06:52:55.144582 2634 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="c4313c5f-3b86-48de-8f3c-02d7e007542a" path="/var/lib/kubelet/pods/c4313c5f-3b86-48de-8f3c-02d7e007542a/volumes"
I1229 06:56:16.395719 17440 command_runner.go:130] > Dec 29 06:52:58 functional-695625 kubelet[2634]: E1229 06:52:58.655985 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.395743 17440 command_runner.go:130] > Dec 29 06:53:20 functional-695625 kubelet[2634]: E1229 06:53:20.683378 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395770 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913108 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.395806 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913180 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395831 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913193 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395859 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915141 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.395885 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915181 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395903 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915192 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395929 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139490 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.395956 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139600 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395981 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139623 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396000 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139634 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396027 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917175 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396052 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917271 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396087 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917284 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396114 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918722 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396138 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918780 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396161 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918792 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396186 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139097 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.396267 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139170 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396295 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139187 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396315 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139214 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396339 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921730 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396362 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921808 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396387 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921823 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396413 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.923664 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396433 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924161 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396458 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924185 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396484 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139396 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.396508 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139458 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396526 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139472 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396550 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139485 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396585 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239338 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396609 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239383 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396634 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244411 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:56:16.396662 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244504 2634 container_log_manager.go:154] "Failed to rotate container logs" err="failed to list containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396687 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458139 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:56:16.396711 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458218 2634 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to list pod stats: failed to get pod or container map: failed to list all containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396739 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926377 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396763 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926435 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396786 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926447 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396821 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.994121 2634 log.go:32] "Version from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396848 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927827 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396872 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927867 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396891 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927930 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396919 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140553 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.396943 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140635 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396966 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140653 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396989 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140664 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397016 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930020 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397040 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930083 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397064 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930129 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397089 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932311 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397114 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932363 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397139 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932375 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397161 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140618 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.397187 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140679 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397211 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140697 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397233 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140709 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397256 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933321 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397281 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933382 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397307 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933393 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397330 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241324 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397358 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241391 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397387 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935649 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397424 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935930 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397450 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935948 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397477 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140389 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.397500 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140507 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397521 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140525 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397544 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140536 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397571 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937258 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397594 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937350 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397618 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937364 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397644 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939069 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397668 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939129 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397686 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939141 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397742 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139354 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.397766 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139413 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397786 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139428 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397818 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139440 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397849 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941237 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397872 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941285 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397897 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941296 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397918 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.109014 2634 kubelet.go:2691] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="6.97s"
I1229 06:56:16.397940 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.125762 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:56:16.397961 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.129855 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.397984 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.131487 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.398006 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.140438 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.398027 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.398047 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:56:16.398071 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:56:16.398100 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:56:16.398122 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:56:16.398141 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:56:16.398162 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:56:16.398186 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:56:16.398209 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:56:16.398244 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:56:16.398272 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:56:16.398294 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:56:16.398317 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:56:16.398350 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:56:16.398371 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:56:16.398394 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.398413 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.398456 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.398481 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.398498 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:56:16.398525 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:56:16.398557 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.398599 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:56:16.398632 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:56:16.398661 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:56:16.398683 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:56:16.398714 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:56:16.398746 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:56:16.398769 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.398813 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.398843 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.398873 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:56:16.398910 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:56:16.398942 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:56:16.398963 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:56:16.398985 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399007 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:56:16.399028 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399052 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:56:16.399082 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399104 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:56:16.399121 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.399145 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399170 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399191 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399209 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399231 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.399253 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399275 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399295 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:56:16.399309 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399328 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:56:16.399366 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.399402 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.399416 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:56:16.399427 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:56:16.399440 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:56:16.399454 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399467 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:56:16.399491 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399517 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:56:16.399553 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.399565 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:56:16.399576 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:56:16.399588 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399598 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.399618 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.399629 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399640 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:56:16.399653 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:56:16.399671 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.399684 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399694 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:56:16.399724 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.399741 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399752 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:56:16.399771 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.399782 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399801 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.399822 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.399834 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399845 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399857 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399866 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:56:16.399885 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.399928 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:56:16.400087 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.400109 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.400130 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.400140 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.400147 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:56:16.400153 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:56:16.400162 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:56:16.400169 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:56:16.400175 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:56:16.400184 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:56:16.400193 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.400201 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:56:16.400213 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:56:16.400222 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:56:16.400233 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:56:16.400243 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.400253 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:56:16.400262 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:56:16.400272 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:56:16.400281 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:56:16.400693 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:56:16.400713 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:56:16.400724 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:56:16.400734 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:56:16.400742 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:56:16.400751 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:56:16.400760 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:56:16.400768 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:56:16.400780 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:56:16.400812 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:56:16.400833 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:56:16.400853 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:56:16.400868 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:56:16.400877 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:56:16.400887 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.400896 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:56:16.400903 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:56:16.400915 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:56:16.400924 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:56:16.400936 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:56:16.400950 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:56:16.400961 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:56:16.400972 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:56:16.400985 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:56:16.400993 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:56:16.401003 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:56:16.401016 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:56:16.401027 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:56:16.401036 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:56:16.401045 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:56:16.401053 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:56:16.401070 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:56:16.401083 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:56:16.401100 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.401120 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.401132 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:56:16.401141 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:56:16.401150 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:56:16.401160 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:56:16.401173 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:56:16.401180 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:56:16.401189 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:56:16.401198 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:56:16.401209 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:56:16.401217 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:56:16.401228 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:56:16.401415 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:56:16.401435 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:56:16.401444 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:56:16.401456 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:56:16.401467 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401486 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401508 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401529 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:56:16.401553 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:56:16.401575 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:56:16.401589 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:56:16.401602 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:56:16.401614 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:56:16.401628 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401640 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:56:16.401653 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:56:16.401667 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:56:16.401679 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.401693 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:56:16.401706 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:56:16.401720 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:56:16.401733 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401745 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.401762 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:56:16.401816 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401840 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401871 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:56:16.401900 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401920 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:56:16.401958 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.401977 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:56:16.401987 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.402002 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402019 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:56:16.402033 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402048 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:56:16.402065 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:56:16.402085 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.402107 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.402134 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402169 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402204 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:56:16.402228 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.402250 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402272 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402294 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402314 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:56:16.402335 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:56:16.402349 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402367 17440 command_runner.go:130] > Dec 29 06:55:06 functional-695625 kubelet[6517]: E1229 06:55:06.329330 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="6.4s"
I1229 06:56:16.402405 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.090561 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.402421 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.592540 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:56:16.402433 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: I1229 06:55:08.994308 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.402444 17440 command_runner.go:130] > Dec 29 06:55:10 functional-695625 kubelet[6517]: E1229 06:55:10.291711 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402530 17440 command_runner.go:130] > Dec 29 06:55:20 functional-695625 kubelet[6517]: E1229 06:55:20.292793 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402557 17440 command_runner.go:130] > Dec 29 06:55:22 functional-695625 kubelet[6517]: E1229 06:55:22.729733 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:56:16.402569 17440 command_runner.go:130] > Dec 29 06:55:30 functional-695625 kubelet[6517]: E1229 06:55:30.293859 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402585 17440 command_runner.go:130] > Dec 29 06:55:39 functional-695625 kubelet[6517]: E1229 06:55:39.730496 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:56:16.402600 17440 command_runner.go:130] > Dec 29 06:55:40 functional-695625 kubelet[6517]: E1229 06:55:40.294978 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402639 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.093022 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.402655 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.996721 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:56:16.402666 17440 command_runner.go:130] > Dec 29 06:55:43 functional-695625 kubelet[6517]: I1229 06:55:43.798535 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.402677 17440 command_runner.go:130] > Dec 29 06:55:50 functional-695625 kubelet[6517]: E1229 06:55:50.295990 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402697 17440 command_runner.go:130] > Dec 29 06:55:56 functional-695625 kubelet[6517]: E1229 06:55:56.732252 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:56:16.402714 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.228455 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:56:16.402726 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.296294 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402737 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.339811 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.402752 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.590728 6517 scope.go:122] "RemoveContainer" containerID="d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:56:16.402917 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.596576 6517 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:56:16.402934 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.196928 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.402947 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199564 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.402959 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199638 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.402972 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.199656 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.402996 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199813 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403011 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.211732 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403026 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.212086 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403043 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226269 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403056 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226760 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403070 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226846 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403082 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226932 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.403096 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240397 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403110 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.403125 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240759 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403138 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240798 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403152 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241099 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403292 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241133 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403310 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241440 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403325 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241482 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403339 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: I1229 06:56:02.241498 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403361 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241585 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403376 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246390 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403389 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246454 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403402 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246667 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403417 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246717 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403428 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: I1229 06:56:03.246732 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403450 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246832 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403464 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.297136 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.403480 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342375 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403495 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403506 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: I1229 06:56:10.342477 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403636 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342670 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403671 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593708 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403686 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593770 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403702 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598591 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403720 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598652 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403739 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606502 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403753 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606600 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.403767 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302101 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403780 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302675 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403806 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302176 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403820 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302763 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403833 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: I1229 06:56:11.302780 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403850 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302307 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403871 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302816 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403890 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.321043 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403914 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.321965 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403936 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322030 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403952 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.322044 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:56:16.403976 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322163 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403994 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323008 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.404007 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323148 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.404022 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.404034 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336097 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.404046 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: I1229 06:56:13.336114 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:56:16.404066 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336243 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.404085 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.733654 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:56:16.404122 17440 command_runner.go:130] > Dec 29 06:56:16 functional-695625 kubelet[6517]: E1229 06:56:16.095560 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.454878 17440 logs.go:123] Gathering logs for kube-scheduler [4d49952084c9] ...
I1229 06:56:16.454917 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4d49952084c9"
I1229 06:56:16.478085 17440 command_runner.go:130] ! I1229 06:53:52.882050 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:56:16.478126 17440 command_runner.go:130] ! W1229 06:54:52.896472 1 authentication.go:397] Error looking up in-cluster authentication configuration: the server was unable to return a response in the time allotted, but may still be processing the request (get configmaps extension-apiserver-authentication)
I1229 06:56:16.478136 17440 command_runner.go:130] ! W1229 06:54:52.896499 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
I1229 06:56:16.478148 17440 command_runner.go:130] ! W1229 06:54:52.896506 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1229 06:56:16.478155 17440 command_runner.go:130] ! I1229 06:54:52.913597 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0"
I1229 06:56:16.478166 17440 command_runner.go:130] ! I1229 06:54:52.913622 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.478175 17440 command_runner.go:130] ! I1229 06:54:52.915784 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1229 06:56:16.478185 17440 command_runner.go:130] ! I1229 06:54:52.915816 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:56:16.478194 17440 command_runner.go:130] ! I1229 06:54:52.915823 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1229 06:56:16.478203 17440 command_runner.go:130] ! I1229 06:54:52.915940 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:56:16.478825 17440 logs.go:123] Gathering logs for kube-controller-manager [f48fc04e3475] ...
I1229 06:56:16.478843 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f48fc04e3475"
I1229 06:56:16.501568 17440 command_runner.go:130] ! I1229 06:56:01.090404 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:56:16.501592 17440 command_runner.go:130] ! I1229 06:56:01.103535 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:56:16.501601 17440 command_runner.go:130] ! I1229 06:56:01.103787 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.501610 17440 command_runner.go:130] ! I1229 06:56:01.105458 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:56:16.501623 17440 command_runner.go:130] ! I1229 06:56:01.105665 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.501630 17440 command_runner.go:130] ! I1229 06:56:01.105907 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:56:16.501636 17440 command_runner.go:130] ! I1229 06:56:01.105924 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:56:16.501982 17440 logs.go:123] Gathering logs for kube-controller-manager [17fe16a2822a] ...
I1229 06:56:16.501996 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 17fe16a2822a"
I1229 06:56:16.524487 17440 command_runner.go:130] ! I1229 06:53:51.283329 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:56:16.524514 17440 command_runner.go:130] ! I1229 06:53:51.303666 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:56:16.524523 17440 command_runner.go:130] ! I1229 06:53:51.303706 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.524767 17440 command_runner.go:130] ! I1229 06:53:51.307865 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:56:16.524788 17440 command_runner.go:130] ! I1229 06:53:51.308287 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.524805 17440 command_runner.go:130] ! I1229 06:53:51.309479 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:56:16.524812 17440 command_runner.go:130] ! I1229 06:53:51.309545 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:56:16.526406 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:56:16.526437 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:57:16.604286 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:57:16.606268 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.079810784s)
W1229 06:57:16.606306 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:57:16.606317 17440 logs.go:123] Gathering logs for kube-apiserver [18d0015c724a] ...
I1229 06:57:16.606331 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 18d0015c724a"
I1229 06:57:16.636305 17440 command_runner.go:130] ! Error response from daemon: No such container: 18d0015c724a
W1229 06:57:16.636367 17440 logs.go:130] failed kube-apiserver [18d0015c724a]: command: /bin/bash -c "docker logs --tail 400 18d0015c724a" /bin/bash -c "docker logs --tail 400 18d0015c724a": Process exited with status 1
stdout:
stderr:
Error response from daemon: No such container: 18d0015c724a
output:
** stderr **
Error response from daemon: No such container: 18d0015c724a
** /stderr **
I1229 06:57:16.636376 17440 logs.go:123] Gathering logs for kube-scheduler [a79d99ad3fde] ...
I1229 06:57:16.636391 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a79d99ad3fde"
I1229 06:57:16.657452 17440 command_runner.go:130] ! I1229 06:53:51.269699 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:57:19.160135 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:57:24.162053 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:57:24.162161 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I1229 06:57:24.182182 17440 command_runner.go:130] > b206d555ad19
I1229 06:57:24.183367 17440 logs.go:282] 1 containers: [b206d555ad19]
I1229 06:57:24.183464 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I1229 06:57:24.206759 17440 command_runner.go:130] > 6b7711ee25a2
I1229 06:57:24.206821 17440 command_runner.go:130] > d81259f64136
I1229 06:57:24.206853 17440 logs.go:282] 2 containers: [6b7711ee25a2 d81259f64136]
I1229 06:57:24.206926 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I1229 06:57:24.228856 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:57:24.228897 17440 logs.go:282] 1 containers: [6f69ba6a1553]
I1229 06:57:24.228968 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I1229 06:57:24.247867 17440 command_runner.go:130] > 4d49952084c9
I1229 06:57:24.247890 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:57:24.249034 17440 logs.go:282] 2 containers: [4d49952084c9 a79d99ad3fde]
I1229 06:57:24.249130 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I1229 06:57:24.268209 17440 command_runner.go:130] > 8911777281f4
I1229 06:57:24.269160 17440 logs.go:282] 1 containers: [8911777281f4]
I1229 06:57:24.269243 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I1229 06:57:24.288837 17440 command_runner.go:130] > f48fc04e3475
I1229 06:57:24.288871 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:57:24.290245 17440 logs.go:282] 2 containers: [f48fc04e3475 17fe16a2822a]
I1229 06:57:24.290337 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I1229 06:57:24.312502 17440 logs.go:282] 0 containers: []
W1229 06:57:24.312531 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:57:24.312592 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I1229 06:57:24.334811 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:57:24.334849 17440 logs.go:282] 1 containers: [bd96b57aa9fc]
I1229 06:57:24.334875 17440 logs.go:123] Gathering logs for kube-apiserver [b206d555ad19] ...
I1229 06:57:24.334888 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 b206d555ad19"
I1229 06:57:24.357541 17440 command_runner.go:130] ! I1229 06:57:22.434262 1 options.go:263] external host was not specified, using 192.168.39.121
I1229 06:57:24.357567 17440 command_runner.go:130] ! I1229 06:57:22.436951 1 server.go:150] Version: v1.35.0
I1229 06:57:24.357577 17440 command_runner.go:130] ! I1229 06:57:22.436991 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:57:24.357602 17440 command_runner.go:130] ! E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
W1229 06:57:24.359181 17440 logs.go:138] Found kube-apiserver [b206d555ad19] problem: E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:57:24.359206 17440 logs.go:123] Gathering logs for kube-controller-manager [f48fc04e3475] ...
I1229 06:57:24.359218 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f48fc04e3475"
I1229 06:57:24.381077 17440 command_runner.go:130] ! I1229 06:56:01.090404 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:57:24.381103 17440 command_runner.go:130] ! I1229 06:56:01.103535 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:57:24.381113 17440 command_runner.go:130] ! I1229 06:56:01.103787 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:57:24.381121 17440 command_runner.go:130] ! I1229 06:56:01.105458 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:57:24.381131 17440 command_runner.go:130] ! I1229 06:56:01.105665 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:57:24.381137 17440 command_runner.go:130] ! I1229 06:56:01.105907 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:57:24.381144 17440 command_runner.go:130] ! I1229 06:56:01.105924 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:57:24.382680 17440 logs.go:123] Gathering logs for container status ...
I1229 06:57:24.382711 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:57:24.427354 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
I1229 06:57:24.427382 17440 command_runner.go:130] > b206d555ad194 5c6acd67e9cd1 2 seconds ago Exited kube-apiserver 5 d3819cc8ab802 kube-apiserver-functional-695625 kube-system
I1229 06:57:24.427400 17440 command_runner.go:130] > f48fc04e34751 2c9a4b058bd7e About a minute ago Running kube-controller-manager 2 0a96e34d38f8c kube-controller-manager-functional-695625 kube-system
I1229 06:57:24.427411 17440 command_runner.go:130] > 6b7711ee25a2d 0a108f7189562 About a minute ago Running etcd 2 173054afc2f39 etcd-functional-695625 kube-system
I1229 06:57:24.427421 17440 command_runner.go:130] > 4d49952084c92 550794e3b12ac 3 minutes ago Running kube-scheduler 2 fefef7c5591ea kube-scheduler-functional-695625 kube-system
I1229 06:57:24.427441 17440 command_runner.go:130] > 6f69ba6a1553a aa5e3ebc0dfed 3 minutes ago Exited coredns 1 a014f32abcd01 coredns-7d764666f9-wfq7m kube-system
I1229 06:57:24.427454 17440 command_runner.go:130] > d81259f64136c 0a108f7189562 3 minutes ago Exited etcd 1 1fc5fa7d92959 etcd-functional-695625 kube-system
I1229 06:57:24.427465 17440 command_runner.go:130] > 17fe16a2822a8 2c9a4b058bd7e 3 minutes ago Exited kube-controller-manager 1 98261fa185f6e kube-controller-manager-functional-695625 kube-system
I1229 06:57:24.427477 17440 command_runner.go:130] > a79d99ad3fde3 550794e3b12ac 3 minutes ago Exited kube-scheduler 1 b046056ff071b kube-scheduler-functional-695625 kube-system
I1229 06:57:24.427488 17440 command_runner.go:130] > bd96b57aa9fce 6e38f40d628db 4 minutes ago Exited storage-provisioner 0 64853b50a6c5e storage-provisioner kube-system
I1229 06:57:24.427509 17440 command_runner.go:130] > 8911777281f41 32652ff1bbe6b 4 minutes ago Exited kube-proxy 0 548561c7ada8f kube-proxy-g7lp9 kube-system
I1229 06:57:24.430056 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:57:24.430095 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:57:24.453665 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239338 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453712 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239383 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453738 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244411 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:57:24.453770 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244504 2634 container_log_manager.go:154] "Failed to rotate container logs" err="failed to list containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453809 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458139 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:57:24.453838 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458218 2634 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to list pod stats: failed to get pod or container map: failed to list all containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453867 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926377 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.453891 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926435 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453911 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926447 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453928 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.994121 2634 log.go:32] "Version from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453945 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927827 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.453961 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927867 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453974 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927930 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454002 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140553 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454022 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140635 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454040 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140653 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454058 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140664 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454074 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930020 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454087 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930083 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454103 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930129 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454120 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932311 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454135 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932363 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454149 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932375 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454165 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140618 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454179 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140679 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454194 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140697 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454208 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140709 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454224 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933321 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454246 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933382 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454262 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933393 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454276 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241324 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454294 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241391 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454310 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935649 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454326 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935930 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454342 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935948 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454358 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140389 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454371 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140507 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454386 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140525 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454401 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140536 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454423 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937258 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454447 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937350 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454472 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937364 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454500 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939069 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454519 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939129 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454533 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939141 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454549 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139354 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454565 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139413 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454579 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139428 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454593 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139440 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454608 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941237 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454625 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941285 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454640 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941296 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454655 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.109014 2634 kubelet.go:2691] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="6.97s"
I1229 06:57:24.454667 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.125762 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:57:24.454680 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.129855 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.454697 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.131487 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.454714 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.140438 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:57:24.454729 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.454741 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:57:24.454816 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:57:24.454842 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:57:24.454855 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:57:24.454870 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:57:24.454881 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:57:24.454896 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:57:24.454912 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:57:24.454940 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:57:24.454957 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:57:24.454969 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:57:24.454987 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:57:24.455012 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:57:24.455025 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:57:24.455039 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.455055 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.455081 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.455097 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:57:24.455110 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:57:24.455125 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:57:24.455144 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.455165 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:57:24.455186 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:57:24.455204 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:57:24.455224 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:57:24.455243 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:57:24.455275 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:57:24.455294 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.455310 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.455326 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.455345 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:57:24.455366 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:57:24.455386 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:57:24.455404 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:57:24.455423 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455446 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:57:24.455472 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455490 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:57:24.455506 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455528 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:57:24.455550 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.455573 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455588 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455603 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455615 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.455628 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.455640 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455657 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455669 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:57:24.455681 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455699 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:57:24.455720 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.455739 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.455750 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:57:24.455810 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:57:24.455823 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:57:24.455835 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455848 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:57:24.455860 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455872 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:57:24.455892 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.455904 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:57:24.455916 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:57:24.455930 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455967 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.455990 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.456008 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.456019 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:57:24.456031 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:57:24.456052 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.456067 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.456078 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:57:24.456100 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.456114 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.456124 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:57:24.456144 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.456159 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.456169 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.456191 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.456205 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.456216 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.456229 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.456239 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:57:24.456260 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.456304 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:57:24.456318 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.456331 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.456352 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.456364 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:57:24.456372 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:57:24.456379 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:57:24.456386 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:57:24.456396 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:57:24.456406 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:57:24.456423 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:57:24.456441 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:57:24.456458 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:57:24.456472 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:57:24.456487 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:57:24.456503 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:57:24.456520 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:57:24.456540 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:57:24.456560 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:57:24.456573 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:57:24.456584 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:57:24.456626 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:57:24.456639 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:57:24.456647 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:57:24.456657 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:57:24.456665 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:57:24.456676 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:57:24.456685 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:57:24.456695 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:57:24.456703 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:57:24.456714 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:57:24.456726 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:57:24.456739 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:57:24.456748 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:57:24.456761 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:57:24.456771 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.456782 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:57:24.456790 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:57:24.456811 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:57:24.456821 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:57:24.456832 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:57:24.456845 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:57:24.456853 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:57:24.456866 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:57:24.456875 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:57:24.456885 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:57:24.456893 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:57:24.456907 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:57:24.456918 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:57:24.456927 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:57:24.456937 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:57:24.456947 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:57:24.456959 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:57:24.456971 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:57:24.456990 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.457011 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.457023 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:57:24.457032 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:57:24.457044 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:57:24.457054 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:57:24.457067 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:57:24.457074 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:57:24.457083 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:57:24.457093 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:57:24.457105 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:57:24.457112 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:57:24.457125 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:57:24.457133 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:57:24.457145 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:57:24.457154 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:57:24.457168 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:57:24.457178 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457192 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457205 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457220 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:57:24.457235 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:57:24.457247 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:57:24.457258 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:57:24.457271 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:57:24.457284 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:57:24.457299 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457310 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:57:24.457322 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:57:24.457333 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:57:24.457345 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:57:24.457359 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:57:24.457370 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:57:24.457381 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:57:24.457396 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457410 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.457436 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:57:24.457460 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457481 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457500 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:57:24.457515 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457533 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:57:24.457586 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.457604 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.457613 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.457633 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457649 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:57:24.457664 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457680 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:57:24.457697 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:57:24.457717 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.457740 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.457763 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457785 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457817 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:57:24.457904 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.457927 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457948 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457976 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457996 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:57:24.458019 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:57:24.458034 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458050 17440 command_runner.go:130] > Dec 29 06:55:06 functional-695625 kubelet[6517]: E1229 06:55:06.329330 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="6.4s"
I1229 06:57:24.458090 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.090561 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.458106 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.592540 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.458116 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: I1229 06:55:08.994308 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.458130 17440 command_runner.go:130] > Dec 29 06:55:10 functional-695625 kubelet[6517]: E1229 06:55:10.291711 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458141 17440 command_runner.go:130] > Dec 29 06:55:20 functional-695625 kubelet[6517]: E1229 06:55:20.292793 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458158 17440 command_runner.go:130] > Dec 29 06:55:22 functional-695625 kubelet[6517]: E1229 06:55:22.729733 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:57:24.458170 17440 command_runner.go:130] > Dec 29 06:55:30 functional-695625 kubelet[6517]: E1229 06:55:30.293859 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458184 17440 command_runner.go:130] > Dec 29 06:55:39 functional-695625 kubelet[6517]: E1229 06:55:39.730496 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:57:24.458198 17440 command_runner.go:130] > Dec 29 06:55:40 functional-695625 kubelet[6517]: E1229 06:55:40.294978 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458263 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.093022 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.458295 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.996721 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.458316 17440 command_runner.go:130] > Dec 29 06:55:43 functional-695625 kubelet[6517]: I1229 06:55:43.798535 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.458339 17440 command_runner.go:130] > Dec 29 06:55:50 functional-695625 kubelet[6517]: E1229 06:55:50.295990 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458367 17440 command_runner.go:130] > Dec 29 06:55:56 functional-695625 kubelet[6517]: E1229 06:55:56.732252 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.458389 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.228455 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:57:24.458409 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.296294 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458429 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.339811 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.458447 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.590728 6517 scope.go:122] "RemoveContainer" containerID="d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:57:24.458468 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.596576 6517 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:57:24.458490 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.196928 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.458512 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199564 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458529 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199638 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458542 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.199656 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458572 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199813 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458587 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.211732 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458602 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.212086 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.458617 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226269 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458632 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226760 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.458644 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226846 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458659 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226932 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.458674 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240397 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458686 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.458702 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240759 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458717 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240798 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.458732 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241099 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458746 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241133 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.458762 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241440 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458777 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241482 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458790 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: I1229 06:56:02.241498 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458824 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241585 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458839 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246390 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458852 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246454 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.458865 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246667 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458879 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246717 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458889 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: I1229 06:56:03.246732 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458911 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246832 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458925 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.297136 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458939 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342375 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458952 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458964 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: I1229 06:56:10.342477 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458983 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342670 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458998 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593708 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459016 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593770 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.459031 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598591 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459048 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598652 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.459062 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606502 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459076 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606600 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.459090 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302101 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459104 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302675 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.459118 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302176 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459132 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302763 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459145 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: I1229 06:56:11.302780 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.459158 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302307 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459174 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302816 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.459186 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.321043 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.459201 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.321965 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459215 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322030 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459225 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.322044 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459247 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322163 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459261 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323008 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459274 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323148 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.459286 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459302 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336097 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459314 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: I1229 06:56:13.336114 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459334 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336243 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459352 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.733654 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.459392 17440 command_runner.go:130] > Dec 29 06:56:16 functional-695625 kubelet[6517]: E1229 06:56:16.095560 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.459418 17440 command_runner.go:130] > Dec 29 06:56:17 functional-695625 kubelet[6517]: E1229 06:56:17.801052 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.459438 17440 command_runner.go:130] > Dec 29 06:56:19 functional-695625 kubelet[6517]: I1229 06:56:19.403026 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.459461 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.297746 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.459483 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342467 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459502 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342554 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459515 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.342589 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459537 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342829 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459552 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.385984 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459567 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386062 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459579 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.386078 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459599 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386220 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459613 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.298955 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.459634 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.734998 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.459649 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185639 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459662 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185732 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459676 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.185750 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459693 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493651 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459707 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493733 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459720 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.493755 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459741 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493996 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459753 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.510294 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459769 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511464 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459782 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511520 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459806 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.511535 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459829 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511684 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459845 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525404 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459859 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525467 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459875 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: I1229 06:56:34.525482 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459897 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525663 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459911 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.300040 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.459924 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342011 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459938 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342082 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459950 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.342099 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459972 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342223 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459987 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567456 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460000 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567665 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460016 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.567686 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.460036 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.568152 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.460053 17440 command_runner.go:130] > Dec 29 06:56:47 functional-695625 kubelet[6517]: E1229 06:56:47.736964 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:57:24.460094 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.098168 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.27202431 +0000 UTC m=+0.287773690,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.460108 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.300747 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460124 17440 command_runner.go:130] > Dec 29 06:56:53 functional-695625 kubelet[6517]: E1229 06:56:53.405155 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.460136 17440 command_runner.go:130] > Dec 29 06:56:56 functional-695625 kubelet[6517]: I1229 06:56:56.606176 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.460148 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.301915 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460162 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.330173 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:57:24.460182 17440 command_runner.go:130] > Dec 29 06:57:04 functional-695625 kubelet[6517]: E1229 06:57:04.738681 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.460195 17440 command_runner.go:130] > Dec 29 06:57:10 functional-695625 kubelet[6517]: E1229 06:57:10.302083 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460206 17440 command_runner.go:130] > Dec 29 06:57:20 functional-695625 kubelet[6517]: E1229 06:57:20.302612 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460221 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185645 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460236 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185704 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.460254 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.740062 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.460269 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.185952 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460283 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.186017 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460296 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.186034 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.460308 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.873051 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.460321 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874264 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460334 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874357 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460347 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.874375 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:57:24.460367 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874499 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.460381 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460395 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892083 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460414 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: I1229 06:57:23.892098 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:57:24.460450 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892218 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.460499 17440 command_runner.go:130] > Dec 29 06:57:24 functional-695625 kubelet[6517]: E1229 06:57:24.100978 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.27223373 +0000 UTC m=+0.287983111,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.513870 17440 logs.go:123] Gathering logs for etcd [d81259f64136] ...
I1229 06:57:24.513913 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 d81259f64136"
I1229 06:57:24.542868 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517725Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:57:24.542904 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517828Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:57:24.542974 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.517848Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:57:24.542992 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519323Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:57:24.543020 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.519372Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:57:24.543037 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519700Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:57:24.543067 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.522332Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:57:24.543085 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.530852Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:57:24.543199 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.531312Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:57:24.543237 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.533505Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc00006a930}"}
I1229 06:57:24.543258 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.562961Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:57:24.543276 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.566967Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"33.344174ms"}
I1229 06:57:24.543291 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.569353Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":497}
I1229 06:57:24.543306 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596637Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:57:24.543327 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596694Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:57:24.543344 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596795Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:57:24.543365 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.620855Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":497}
I1229 06:57:24.543380 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.621587Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:57:24.543393 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624518Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:57:24.543419 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624664Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:57:24.543437 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624700Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:57:24.543464 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624712Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:57:24.543483 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624720Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:57:24.543499 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624728Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:57:24.543511 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624764Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:57:24.543561 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624799Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:57:24.543585 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624856Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 2"}
I1229 06:57:24.543605 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624867Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 2, commit: 497, applied: 0, lastindex: 497, lastterm: 2]"}
I1229 06:57:24.543623 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.634002Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:57:24.543659 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.644772Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:57:24.543680 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.681530Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:57:24.543701 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686046Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:57:24.543722 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686350Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:57:24.543744 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686391Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:57:24.543770 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687141Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:57:24.543821 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687399Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:57:24.543840 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687425Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:57:24.543865 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687475Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:57:24.543886 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687536Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:57:24.543908 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687564Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:57:24.543927 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687571Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:57:24.543945 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687702Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:57:24.543962 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687713Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:57:24.543980 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692750Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:57:24.544010 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692847Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:57:24.544031 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.694703Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:57:24.544065 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830725Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 2"}
I1229 06:57:24.544084 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830765Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 2"}
I1229 06:57:24.544103 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830844Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 2"}
I1229 06:57:24.544120 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830858Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:57:24.544136 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830936Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 3"}
I1229 06:57:24.544157 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832101Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:57:24.544176 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832130Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:57:24.544193 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832148Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 3"}
I1229 06:57:24.544213 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832166Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 3"}
I1229 06:57:24.544224 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835446Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:57:24.544248 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835384Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:57:24.544264 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839145Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:57:24.544283 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839733Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:57:24.544298 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851017Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:57:24.544314 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851067Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:57:24.544331 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851748Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:57:24.544345 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.856729Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:57:24.544364 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.869216Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:57:24.544381 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706108Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
I1229 06:57:24.544405 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706269Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:57:24.544430 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:51.706381Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:57:24.544465 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.707655Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:57:24.544517 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.709799Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544537 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.709913Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cbdf275f553df7c2","current-leader-member-id":"cbdf275f553df7c2"}
I1229 06:57:24.544554 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710255Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
I1229 06:57:24.544575 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710690Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:57:24.544595 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710782Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:57:24.544623 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.710832Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544641 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710742Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
I1229 06:57:24.544662 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711002Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:57:24.544683 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711035Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:57:24.544711 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.711045Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544730 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717452Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:57:24.544767 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.717551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544807 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717601Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:57:24.544828 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717654Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:57:24.552509 17440 logs.go:123] Gathering logs for coredns [6f69ba6a1553] ...
I1229 06:57:24.552540 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6f69ba6a1553"
I1229 06:57:24.575005 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:57:24.575036 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:57:24.597505 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.597545 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.597560 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.597577 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.597596 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.597610 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:57:24.597628 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:57:24.597642 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:57:24.597654 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.597667 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:57:24.597682 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.597705 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.597733 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.597753 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.597765 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.597773 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:57:24.597803 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.597814 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:57:24.597825 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.597834 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:57:24.597841 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:57:24.597848 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:57:24.597856 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.597866 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.597874 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.597883 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.597900 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.597909 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.597916 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.597925 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.597936 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.597944 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.597953 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:57:24.597960 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:57:24.597973 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:57:24.597981 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:57:24.597991 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:57:24.597999 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:57:24.598010 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:57:24.598017 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:57:24.598029 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:57:24.598041 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:57:24.598054 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598067 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598074 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:57:24.598084 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:57:24.598095 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:57:24.598104 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:57:24.598111 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:57:24.598117 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:57:24.598126 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.598132 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:57:24.598141 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.598154 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.598174 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.598186 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.598196 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.598205 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:57:24.598224 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.598235 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:57:24.598246 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.598256 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:57:24.598264 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.598273 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.598281 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.598289 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.598297 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.598306 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.598314 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:57:24.598320 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:57:24.598327 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:57:24.598334 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.598345 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.598354 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.598365 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.598373 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:57:24.598381 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:57:24.598389 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:57:24.598400 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:57:24.598415 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:57:24.598431 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:57:24.598447 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:57:24.598463 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:57:24.598476 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:57:24.598492 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:57:24.598503 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:57:24.598513 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:57:24.598522 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:57:24.598531 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:57:24.598538 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.598545 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:57:24.598555 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.598578 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.598591 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.598602 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.598613 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.598621 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:57:24.598642 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.598653 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:57:24.598664 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.598674 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:57:24.598683 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.598693 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.598701 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.598716 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.598724 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.598732 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.598760 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598774 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598787 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598815 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598832 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:57:24.598845 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598860 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598873 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598889 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598904 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.598918 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.598933 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598946 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598958 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:57:24.598973 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:57:24.598980 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:57:24.598989 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:57:24.598999 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:57:24.599008 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:57:24.599015 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:57:24.599022 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:57:24.599030 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:57:24.599036 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.599043 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:57:24.599054 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.599065 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.599077 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.599088 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.599099 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.599107 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:57:24.599120 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599138 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599151 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599168 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599185 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599198 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599213 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599228 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599241 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599257 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599270 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599285 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599297 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599319 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.599331 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:57:24.599346 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:57:24.599359 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:57:24.599376 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:57:24.599387 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:57:24.599405 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:57:24.599423 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:57:24.599452 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:57:24.599472 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:57:24.599489 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:57:24.599503 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:57:24.599517 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:57:24.599529 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:57:24.599544 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:57:24.599559 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:57:24.599572 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:57:24.599587 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:57:24.599602 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:57:24.599615 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:57:24.599631 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:57:24.599644 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:57:24.599654 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:57:24.599664 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.599673 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:57:24.599682 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.599692 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.599700 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.599710 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.599747 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.599756 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.599772 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:57:24.599782 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:57:24.599789 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:57:24.599806 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:57:24.599814 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:57:24.599822 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.599830 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.599841 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.599849 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.599860 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:57:24.599868 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:57:24.599879 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:57:24.599886 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:57:24.599896 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:57:24.599907 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:57:24.599914 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:57:24.599922 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:57:24.599934 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.599953 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.599970 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.599983 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600000 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600017 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600034 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:57:24.600049 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600063 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600079 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600092 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600107 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600121 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600137 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600152 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600164 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600177 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600190 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600207 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:57:24.600223 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600235 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:57:24.600247 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:57:24.600261 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600276 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600288 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:57:24.600304 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.600317 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:57:24.600331 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.600345 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:57:24.600357 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 dockerd[4014]: time="2025-12-29T06:56:00.626282205Z" level=info msg="ignoring event" container=78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600373 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 dockerd[4014]: time="2025-12-29T06:56:11.553142622Z" level=info msg="ignoring event" container=18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600386 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 dockerd[4014]: time="2025-12-29T06:56:32.448119389Z" level=info msg="ignoring event" container=0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600403 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.600423 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:57:24.600448 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.600472 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:57:24.600490 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 dockerd[4014]: time="2025-12-29T06:57:22.465508622Z" level=info msg="ignoring event" container=b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.619075 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:57:24.619123 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:58:24.700496 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:58:24.700542 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.081407425s)
W1229 06:58:24.700578 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:58:24.700591 17440 logs.go:123] Gathering logs for etcd [6b7711ee25a2] ...
I1229 06:58:24.700607 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6b7711ee25a2"
I1229 06:58:24.726206 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.924768Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:58:24.726238 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925193Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:58:24.726283 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925252Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:58:24.726296 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925487Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:58:24.726311 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925602Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:58:24.726321 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925710Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:58:24.726342 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925810Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:58:24.726358 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.934471Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:58:24.726438 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.935217Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:58:24.726461 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.937503Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc000068080}"}
I1229 06:58:24.726472 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940423Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:58:24.726483 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940850Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"3.479356ms"}
I1229 06:58:24.726492 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.941120Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":499}
I1229 06:58:24.726503 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945006Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:58:24.726517 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945707Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:58:24.726528 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945966Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:58:24.726540 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.951906Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":499}
I1229 06:58:24.726552 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952063Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:58:24.726560 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952160Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:58:24.726577 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952338Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:58:24.726590 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952385Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:58:24.726607 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952396Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:58:24.726618 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952406Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:58:24.726629 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952416Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:58:24.726636 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952460Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:58:24.726647 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952535Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:58:24.726657 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952604Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 3"}
I1229 06:58:24.726670 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952619Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 3, commit: 499, applied: 0, lastindex: 499, lastterm: 3]"}
I1229 06:58:24.726680 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.955095Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:58:24.726698 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.961356Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:58:24.726711 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.967658Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:58:24.726723 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.968487Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:58:24.726735 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969020Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:58:24.726750 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969260Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:58:24.726765 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969708Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:58:24.726784 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970043Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:58:24.726826 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970828Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:58:24.726839 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971046Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:58:24.726848 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970057Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:58:24.726858 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971258Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:58:24.726870 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970152Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:58:24.726883 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971336Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:58:24.726896 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971370Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:58:24.726906 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970393Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:58:24.726922 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972410Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:58:24.726935 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972698Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:58:24.726947 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353814Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 3"}
I1229 06:58:24.726956 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353865Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 3"}
I1229 06:58:24.726969 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353992Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:58:24.726982 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354007Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:58:24.726997 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354031Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 4"}
I1229 06:58:24.727009 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355853Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 4"}
I1229 06:58:24.727020 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355920Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:58:24.727029 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355940Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 4"}
I1229 06:58:24.727039 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.356018Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 4"}
I1229 06:58:24.727056 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358237Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:58:24.727064 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358323Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:58:24.727072 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358268Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:58:24.727081 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358820Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:58:24.727089 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358859Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:58:24.727100 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360417Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:58:24.727109 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360952Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:58:24.727120 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363710Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:58:24.727132 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363760Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:58:24.733042 17440 logs.go:123] Gathering logs for kube-scheduler [a79d99ad3fde] ...
I1229 06:58:24.733064 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a79d99ad3fde"
I1229 06:58:24.755028 17440 command_runner.go:130] ! I1229 06:53:51.269699 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:58:24.755231 17440 logs.go:123] Gathering logs for kube-proxy [8911777281f4] ...
I1229 06:58:24.755256 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 8911777281f4"
I1229 06:58:24.776073 17440 command_runner.go:130] ! I1229 06:52:47.703648 1 server_linux.go:53] "Using iptables proxy"
I1229 06:58:24.776109 17440 command_runner.go:130] ! I1229 06:52:47.791676 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:58:24.776120 17440 command_runner.go:130] ! I1229 06:52:47.897173 1 shared_informer.go:377] "Caches are synced"
I1229 06:58:24.776135 17440 command_runner.go:130] ! I1229 06:52:47.900073 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.121"]
I1229 06:58:24.776154 17440 command_runner.go:130] ! E1229 06:52:47.906310 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1229 06:58:24.776162 17440 command_runner.go:130] ! I1229 06:52:48.206121 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
I1229 06:58:24.776180 17440 command_runner.go:130] ! error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
I1229 06:58:24.776188 17440 command_runner.go:130] ! Perhaps ip6tables or your kernel needs to be upgraded.
I1229 06:58:24.776195 17440 command_runner.go:130] ! >
I1229 06:58:24.776212 17440 command_runner.go:130] ! I1229 06:52:48.209509 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1229 06:58:24.776224 17440 command_runner.go:130] ! I1229 06:52:48.210145 1 server_linux.go:136] "Using iptables Proxier"
I1229 06:58:24.776249 17440 command_runner.go:130] ! I1229 06:52:48.253805 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1229 06:58:24.776257 17440 command_runner.go:130] ! I1229 06:52:48.255046 1 server.go:529] "Version info" version="v1.35.0"
I1229 06:58:24.776266 17440 command_runner.go:130] ! I1229 06:52:48.255076 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:24.776282 17440 command_runner.go:130] ! I1229 06:52:48.262205 1 config.go:200] "Starting service config controller"
I1229 06:58:24.776296 17440 command_runner.go:130] ! I1229 06:52:48.262238 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1229 06:58:24.776307 17440 command_runner.go:130] ! I1229 06:52:48.262258 1 config.go:106] "Starting endpoint slice config controller"
I1229 06:58:24.776328 17440 command_runner.go:130] ! I1229 06:52:48.262261 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1229 06:58:24.776350 17440 command_runner.go:130] ! I1229 06:52:48.262278 1 config.go:403] "Starting serviceCIDR config controller"
I1229 06:58:24.776366 17440 command_runner.go:130] ! I1229 06:52:48.262282 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1229 06:58:24.776376 17440 command_runner.go:130] ! I1229 06:52:48.270608 1 config.go:309] "Starting node config controller"
I1229 06:58:24.776388 17440 command_runner.go:130] ! I1229 06:52:48.271311 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1229 06:58:24.776404 17440 command_runner.go:130] ! I1229 06:52:48.271337 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1229 06:58:24.776420 17440 command_runner.go:130] ! I1229 06:52:48.363324 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1229 06:58:24.776439 17440 command_runner.go:130] ! I1229 06:52:48.363427 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1229 06:58:24.776453 17440 command_runner.go:130] ! I1229 06:52:48.363671 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1229 06:58:24.778558 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:58:24.778595 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:58:24.793983 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:58:24.794025 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:58:24.794040 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:58:24.794054 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:58:24.794069 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:58:24.794079 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:58:24.794096 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:58:24.794106 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:58:24.794117 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:58:24.794125 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:58:24.794136 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:58:24.794146 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:58:24.794160 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:24.794167 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:58:24.794178 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:58:24.794186 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:24.794196 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:24.794207 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:24.794215 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:58:24.794221 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:58:24.794229 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:58:24.794241 17440 command_runner.go:130] > [Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
I1229 06:58:24.794252 17440 command_runner.go:130] > [ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
I1229 06:58:24.794260 17440 command_runner.go:130] > [ +20.880271] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:24.794271 17440 command_runner.go:130] > [Dec29 06:57] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:24.795355 17440 logs.go:123] Gathering logs for kube-scheduler [4d49952084c9] ...
I1229 06:58:24.795387 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4d49952084c9"
I1229 06:58:24.820602 17440 command_runner.go:130] ! I1229 06:53:52.882050 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:58:24.820635 17440 command_runner.go:130] ! W1229 06:54:52.896472 1 authentication.go:397] Error looking up in-cluster authentication configuration: the server was unable to return a response in the time allotted, but may still be processing the request (get configmaps extension-apiserver-authentication)
I1229 06:58:24.820646 17440 command_runner.go:130] ! W1229 06:54:52.896499 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
I1229 06:58:24.820657 17440 command_runner.go:130] ! W1229 06:54:52.896506 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1229 06:58:24.820665 17440 command_runner.go:130] ! I1229 06:54:52.913597 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0"
I1229 06:58:24.820672 17440 command_runner.go:130] ! I1229 06:54:52.913622 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:24.820681 17440 command_runner.go:130] ! I1229 06:54:52.915784 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1229 06:58:24.820692 17440 command_runner.go:130] ! I1229 06:54:52.915816 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:58:24.820698 17440 command_runner.go:130] ! I1229 06:54:52.915823 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1229 06:58:24.820705 17440 command_runner.go:130] ! I1229 06:54:52.915940 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:58:24.822450 17440 logs.go:123] Gathering logs for kube-controller-manager [17fe16a2822a] ...
I1229 06:58:24.822473 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 17fe16a2822a"
I1229 06:58:24.844122 17440 command_runner.go:130] ! I1229 06:53:51.283329 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:58:24.844156 17440 command_runner.go:130] ! I1229 06:53:51.303666 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:58:24.844170 17440 command_runner.go:130] ! I1229 06:53:51.303706 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:24.844184 17440 command_runner.go:130] ! I1229 06:53:51.307865 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:58:24.844201 17440 command_runner.go:130] ! I1229 06:53:51.308287 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:58:24.844210 17440 command_runner.go:130] ! I1229 06:53:51.309479 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:58:24.844218 17440 command_runner.go:130] ! I1229 06:53:51.309545 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:58:24.845429 17440 logs.go:123] Gathering logs for storage-provisioner [bd96b57aa9fc] ...
I1229 06:58:24.845453 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 bd96b57aa9fc"
I1229 06:58:24.867566 17440 command_runner.go:130] ! I1229 06:52:48.539098 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1229 06:58:24.867597 17440 command_runner.go:130] ! I1229 06:52:48.550309 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1229 06:58:24.867607 17440 command_runner.go:130] ! I1229 06:52:48.550373 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1229 06:58:24.867615 17440 command_runner.go:130] ! W1229 06:52:48.552935 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867622 17440 command_runner.go:130] ! W1229 06:52:48.563735 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867633 17440 command_runner.go:130] ! I1229 06:52:48.564362 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1229 06:58:24.867653 17440 command_runner.go:130] ! I1229 06:52:48.565422 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:58:24.867681 17440 command_runner.go:130] ! I1229 06:52:48.565143 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cfb65e16-c2f7-4c19-a059-8ef64f8f3f2e", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868 became leader
I1229 06:58:24.867694 17440 command_runner.go:130] ! W1229 06:52:48.576668 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867704 17440 command_runner.go:130] ! W1229 06:52:48.582743 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867719 17440 command_runner.go:130] ! I1229 06:52:48.665711 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:58:24.867734 17440 command_runner.go:130] ! W1229 06:52:50.588204 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867750 17440 command_runner.go:130] ! W1229 06:52:50.593815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867763 17440 command_runner.go:130] ! W1229 06:52:52.597431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867817 17440 command_runner.go:130] ! W1229 06:52:52.602815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867836 17440 command_runner.go:130] ! W1229 06:52:54.606663 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867848 17440 command_runner.go:130] ! W1229 06:52:54.612650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867859 17440 command_runner.go:130] ! W1229 06:52:56.616395 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867871 17440 command_runner.go:130] ! W1229 06:52:56.622404 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867883 17440 command_runner.go:130] ! W1229 06:52:58.626804 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867891 17440 command_runner.go:130] ! W1229 06:52:58.637257 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867901 17440 command_runner.go:130] ! W1229 06:53:00.640728 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867914 17440 command_runner.go:130] ! W1229 06:53:00.646446 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867926 17440 command_runner.go:130] ! W1229 06:53:02.650659 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867944 17440 command_runner.go:130] ! W1229 06:53:02.660374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867956 17440 command_runner.go:130] ! W1229 06:53:04.664091 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867972 17440 command_runner.go:130] ! W1229 06:53:04.669806 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867982 17440 command_runner.go:130] ! W1229 06:53:06.674203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867997 17440 command_runner.go:130] ! W1229 06:53:06.680002 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868013 17440 command_runner.go:130] ! W1229 06:53:08.683483 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868028 17440 command_runner.go:130] ! W1229 06:53:08.688934 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868048 17440 command_runner.go:130] ! W1229 06:53:10.693644 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868063 17440 command_runner.go:130] ! W1229 06:53:10.706122 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868071 17440 command_runner.go:130] ! W1229 06:53:12.709949 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868081 17440 command_runner.go:130] ! W1229 06:53:12.715753 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868098 17440 command_runner.go:130] ! W1229 06:53:14.719191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868111 17440 command_runner.go:130] ! W1229 06:53:14.728100 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868127 17440 command_runner.go:130] ! W1229 06:53:16.731658 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868140 17440 command_runner.go:130] ! W1229 06:53:16.737463 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868153 17440 command_runner.go:130] ! W1229 06:53:18.741304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868164 17440 command_runner.go:130] ! W1229 06:53:18.746708 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868177 17440 command_runner.go:130] ! W1229 06:53:20.749662 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868192 17440 command_runner.go:130] ! W1229 06:53:20.755989 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868207 17440 command_runner.go:130] ! W1229 06:53:22.760815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868221 17440 command_runner.go:130] ! W1229 06:53:22.772421 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868236 17440 command_runner.go:130] ! W1229 06:53:24.776403 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868247 17440 command_runner.go:130] ! W1229 06:53:24.783232 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868258 17440 command_runner.go:130] ! W1229 06:53:26.786665 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868275 17440 command_runner.go:130] ! W1229 06:53:26.792239 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868290 17440 command_runner.go:130] ! W1229 06:53:28.796420 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868304 17440 command_runner.go:130] ! W1229 06:53:28.805511 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868320 17440 command_runner.go:130] ! W1229 06:53:30.808544 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868332 17440 command_runner.go:130] ! W1229 06:53:30.816066 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868342 17440 command_runner.go:130] ! W1229 06:53:32.820090 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868358 17440 command_runner.go:130] ! W1229 06:53:32.826208 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868373 17440 command_runner.go:130] ! W1229 06:53:34.829865 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868385 17440 command_runner.go:130] ! W1229 06:53:34.835774 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868400 17440 command_runner.go:130] ! W1229 06:53:36.839291 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868414 17440 command_runner.go:130] ! W1229 06:53:36.853251 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868425 17440 command_runner.go:130] ! W1229 06:53:38.856432 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868438 17440 command_runner.go:130] ! W1229 06:53:38.862360 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.872821 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:58:24.872842 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
W1229 06:58:24.872901 17440 out.go:285] X Problems detected in kube-apiserver [b206d555ad19]:
X Problems detected in kube-apiserver [b206d555ad19]:
W1229 06:58:24.872915 17440 out.go:285] E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:58:24.872919 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:58:24.872923 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:58:34.875381 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:58:39.877679 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:58:39.877779 17440 kubeadm.go:602] duration metric: took 4m48.388076341s to restartPrimaryControlPlane
W1229 06:58:39.877879 17440 out.go:285] ! Unable to restart control-plane node(s), will reset cluster: <no value>
! Unable to restart control-plane node(s), will reset cluster: <no value>
I1229 06:58:39.877946 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I1229 06:58:39.930050 17440 command_runner.go:130] ! W1229 06:58:39.921577 8187 resetconfiguration.go:53] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I1229 06:58:49.935089 17440 command_runner.go:130] ! W1229 06:58:49.926653 8187 reset.go:141] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get "https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
I1229 06:58:49.935131 17440 command_runner.go:130] ! W1229 06:58:49.926754 8187 removeetcdmember.go:105] [reset] No kubeadm config, using etcd pod spec to get data directory
I1229 06:58:50.998307 17440 command_runner.go:130] > [reset] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system"...
I1229 06:58:50.998341 17440 command_runner.go:130] > [reset] Use 'kubeadm init phase upload-config kubeadm --config your-config-file' to re-upload it.
I1229 06:58:50.998348 17440 command_runner.go:130] > [preflight] Running pre-flight checks
I1229 06:58:50.998357 17440 command_runner.go:130] > [reset] Deleted contents of the etcd data directory: /var/lib/minikube/etcd
I1229 06:58:50.998366 17440 command_runner.go:130] > [reset] Stopping the kubelet service
I1229 06:58:50.998372 17440 command_runner.go:130] > [reset] Unmounting mounted directories in "/var/lib/kubelet"
I1229 06:58:50.998386 17440 command_runner.go:130] > [reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
I1229 06:58:50.998407 17440 command_runner.go:130] > [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
I1229 06:58:50.998417 17440 command_runner.go:130] > The reset process does not perform cleanup of CNI plugin configuration,
I1229 06:58:50.998428 17440 command_runner.go:130] > network filtering rules and kubeconfig files.
I1229 06:58:50.998434 17440 command_runner.go:130] > For information on how to perform this cleanup manually, please see:
I1229 06:58:50.998442 17440 command_runner.go:130] > https://k8s.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/
I1229 06:58:50.998458 17440 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force": (11.120499642s)
I1229 06:58:50.998527 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1229 06:58:51.015635 17440 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1229 06:58:51.028198 17440 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1229 06:58:51.040741 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
I1229 06:58:51.040780 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
I1229 06:58:51.040811 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
I1229 06:58:51.040826 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.040865 17440 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.040877 17440 kubeadm.go:158] found existing configuration files:
I1229 06:58:51.040925 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1229 06:58:51.051673 17440 command_runner.go:130] ! grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.052090 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.052155 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1229 06:58:51.064755 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1229 06:58:51.076455 17440 command_runner.go:130] ! grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.076517 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.076577 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1229 06:58:51.088881 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1229 06:58:51.099253 17440 command_runner.go:130] ! grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.099652 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.099710 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1229 06:58:51.111487 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1229 06:58:51.122532 17440 command_runner.go:130] ! grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.122905 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.122972 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1229 06:58:51.135143 17440 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1229 06:58:51.355420 17440 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.355450 17440 command_runner.go:130] ! [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.355543 17440 kubeadm.go:319] [preflight] Some fatal errors occurred:
I1229 06:58:51.355556 17440 command_runner.go:130] ! [preflight] Some fatal errors occurred:
I1229 06:58:51.355615 17440 kubeadm.go:319] [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.355625 17440 command_runner.go:130] ! [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.355790 17440 kubeadm.go:319] [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.355837 17440 command_runner.go:130] ! [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.356251 17440 kubeadm.go:319] error: error execution phase preflight: preflight checks failed
I1229 06:58:51.356265 17440 command_runner.go:130] ! error: error execution phase preflight: preflight checks failed
I1229 06:58:51.356317 17440 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.356324 17440 command_runner.go:130] ! To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.357454 17440 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.357471 17440 command_runner.go:130] > [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.357544 17440 kubeadm.go:319] [preflight] Running pre-flight checks
I1229 06:58:51.357561 17440 command_runner.go:130] > [preflight] Running pre-flight checks
W1229 06:58:51.357680 17440 out.go:285] ! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.357753 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I1229 06:58:51.401004 17440 command_runner.go:130] > [preflight] Running pre-flight checks
I1229 06:58:51.401036 17440 command_runner.go:130] > [reset] Deleted contents of the etcd data directory: /var/lib/etcd
I1229 06:58:51.401047 17440 command_runner.go:130] > [reset] Stopping the kubelet service
I1229 06:58:51.408535 17440 command_runner.go:130] > [reset] Unmounting mounted directories in "/var/lib/kubelet"
I1229 06:58:51.413813 17440 command_runner.go:130] > [reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
I1229 06:58:51.415092 17440 command_runner.go:130] > [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
I1229 06:58:51.415117 17440 command_runner.go:130] > The reset process does not perform cleanup of CNI plugin configuration,
I1229 06:58:51.415128 17440 command_runner.go:130] > network filtering rules and kubeconfig files.
I1229 06:58:51.415137 17440 command_runner.go:130] > For information on how to perform this cleanup manually, please see:
I1229 06:58:51.415145 17440 command_runner.go:130] > https://k8s.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/
I1229 06:58:51.415645 17440 command_runner.go:130] ! W1229 06:58:51.391426 8625 resetconfiguration.go:53] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I1229 06:58:51.415670 17440 command_runner.go:130] ! W1229 06:58:51.392518 8625 removeetcdmember.go:105] [reset] No kubeadm config, using etcd pod spec to get data directory
I1229 06:58:51.415739 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1229 06:58:51.432316 17440 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1229 06:58:51.444836 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
I1229 06:58:51.444860 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
I1229 06:58:51.444867 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
I1229 06:58:51.444874 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.445417 17440 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.445435 17440 kubeadm.go:158] found existing configuration files:
I1229 06:58:51.445485 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1229 06:58:51.457038 17440 command_runner.go:130] ! grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.457099 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.457146 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1229 06:58:51.469980 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1229 06:58:51.480965 17440 command_runner.go:130] ! grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.481435 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.481498 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1229 06:58:51.493408 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1229 06:58:51.504342 17440 command_runner.go:130] ! grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.504404 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.504468 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1229 06:58:51.516567 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1229 06:58:51.526975 17440 command_runner.go:130] ! grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.527475 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.527532 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1229 06:58:51.539365 17440 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1229 06:58:51.587038 17440 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.587068 17440 command_runner.go:130] > [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.587108 17440 kubeadm.go:319] [preflight] Running pre-flight checks
I1229 06:58:51.587113 17440 command_runner.go:130] > [preflight] Running pre-flight checks
I1229 06:58:51.738880 17440 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.738912 17440 command_runner.go:130] ! [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.738963 17440 kubeadm.go:319] [preflight] Some fatal errors occurred:
I1229 06:58:51.738975 17440 command_runner.go:130] ! [preflight] Some fatal errors occurred:
I1229 06:58:51.739029 17440 kubeadm.go:319] [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.739038 17440 command_runner.go:130] ! [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.739157 17440 kubeadm.go:319] [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.739166 17440 command_runner.go:130] ! [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.739271 17440 kubeadm.go:319] error: error execution phase preflight: preflight checks failed
I1229 06:58:51.739294 17440 command_runner.go:130] ! error: error execution phase preflight: preflight checks failed
I1229 06:58:51.739348 17440 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.739355 17440 command_runner.go:130] ! To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.739406 17440 kubeadm.go:403] duration metric: took 5m0.289116828s to StartCluster
I1229 06:58:51.739455 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1229 06:58:51.739507 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-apiserver
I1229 06:58:51.776396 17440 cri.go:96] found id: ""
I1229 06:58:51.776420 17440 logs.go:282] 0 containers: []
W1229 06:58:51.776428 17440 logs.go:284] No container was found matching "kube-apiserver"
I1229 06:58:51.776434 17440 cri.go:61] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1229 06:58:51.776522 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=etcd
I1229 06:58:51.808533 17440 cri.go:96] found id: ""
I1229 06:58:51.808556 17440 logs.go:282] 0 containers: []
W1229 06:58:51.808563 17440 logs.go:284] No container was found matching "etcd"
I1229 06:58:51.808570 17440 cri.go:61] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1229 06:58:51.808625 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=coredns
I1229 06:58:51.841860 17440 cri.go:96] found id: ""
I1229 06:58:51.841887 17440 logs.go:282] 0 containers: []
W1229 06:58:51.841894 17440 logs.go:284] No container was found matching "coredns"
I1229 06:58:51.841900 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1229 06:58:51.841955 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-scheduler
I1229 06:58:51.875485 17440 cri.go:96] found id: ""
I1229 06:58:51.875512 17440 logs.go:282] 0 containers: []
W1229 06:58:51.875520 17440 logs.go:284] No container was found matching "kube-scheduler"
I1229 06:58:51.875526 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1229 06:58:51.875576 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-proxy
I1229 06:58:51.909661 17440 cri.go:96] found id: ""
I1229 06:58:51.909699 17440 logs.go:282] 0 containers: []
W1229 06:58:51.909712 17440 logs.go:284] No container was found matching "kube-proxy"
I1229 06:58:51.909720 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1229 06:58:51.909790 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-controller-manager
I1229 06:58:51.943557 17440 cri.go:96] found id: ""
I1229 06:58:51.943594 17440 logs.go:282] 0 containers: []
W1229 06:58:51.943607 17440 logs.go:284] No container was found matching "kube-controller-manager"
I1229 06:58:51.943616 17440 cri.go:61] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1229 06:58:51.943685 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kindnet
I1229 06:58:51.979189 17440 cri.go:96] found id: ""
I1229 06:58:51.979219 17440 logs.go:282] 0 containers: []
W1229 06:58:51.979228 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:58:51.979234 17440 cri.go:61] listing CRI containers in root : {State:all Name:storage-provisioner Namespaces:[]}
I1229 06:58:51.979285 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=storage-provisioner
I1229 06:58:52.013436 17440 cri.go:96] found id: ""
I1229 06:58:52.013472 17440 logs.go:282] 0 containers: []
W1229 06:58:52.013482 17440 logs.go:284] No container was found matching "storage-provisioner"
I1229 06:58:52.013494 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:58:52.013507 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:58:52.030384 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.030429 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:58:52.030454 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:58:52.030481 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:58:52.030506 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:58:52.030530 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:58:52.030550 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:58:52.030574 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:58:52.030601 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:58:52.030643 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:58:52.030670 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:58:52.030694 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:58:52.030721 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:58:52.030757 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:58:52.030787 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:58:52.030826 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.030853 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.030893 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.030921 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:58:52.030943 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:58:52.030981 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:58:52.031015 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.031053 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:58:52.031087 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:58:52.031117 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:58:52.031146 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:58:52.031189 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:58:52.031223 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:58:52.031253 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.031281 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.031311 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.031347 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:58:52.031383 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:58:52.031422 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:58:52.031445 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:58:52.031467 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.031491 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:58:52.031516 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.031538 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:58:52.031562 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.031584 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:58:52.031606 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.031628 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.031651 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.031673 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.031695 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.031717 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.031738 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.031763 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.031786 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:58:52.031824 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.031855 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:58:52.031894 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.031949 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.031981 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:58:52.032005 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:58:52.032025 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:58:52.032048 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.032069 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:58:52.032093 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.032112 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:58:52.032150 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.032170 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:58:52.032192 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:58:52.032214 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.032234 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.032269 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.032290 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.032314 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:58:52.032335 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:58:52.032371 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.032395 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.032414 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:58:52.032452 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.032473 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.032495 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:58:52.032530 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.032552 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.032573 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.032608 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.032631 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.032655 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.032676 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.032696 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:58:52.032735 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.032819 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:58:52.032845 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.032864 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.032899 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.032919 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:58:52.032935 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:58:52.032948 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:58:52.032960 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:58:52.032981 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:58:52.032995 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:58:52.033012 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:58:52.033029 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:52.033042 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:58:52.033062 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:58:52.033080 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:58:52.033101 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:58:52.033120 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:58:52.033138 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:58:52.033166 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:58:52.033187 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:58:52.033206 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:58:52.033274 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:58:52.033294 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:58:52.033309 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:58:52.033326 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:58:52.033343 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:58:52.033359 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:58:52.033378 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:58:52.033398 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:58:52.033413 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:58:52.033431 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:58:52.033453 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:58:52.033476 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:58:52.033492 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:58:52.033507 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:58:52.033526 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:58:52.033542 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:58:52.033559 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:58:52.033609 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:58:52.033625 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:58:52.033642 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:58:52.033665 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:58:52.033681 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:58:52.033700 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:58:52.033718 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:58:52.033734 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:58:52.033751 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:58:52.033776 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:58:52.033808 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:58:52.033826 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:58:52.033840 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:58:52.033855 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:58:52.033878 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:58:52.033905 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:58:52.033937 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:58:52.033974 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:58:52.033993 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:58:52.034010 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:58:52.034030 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:58:52.034050 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:58:52.034084 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:58:52.034099 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:58:52.034116 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:58:52.034134 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:58:52.034152 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:58:52.034167 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:58:52.034186 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:58:52.034203 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:58:52.034224 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:58:52.034241 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:58:52.034265 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:58:52.034286 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034308 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034332 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034358 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:58:52.034380 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:58:52.034404 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:58:52.034427 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:58:52.034450 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:58:52.034472 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:58:52.034499 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034521 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:58:52.034544 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:58:52.034566 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:58:52.034588 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:58:52.034611 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:58:52.034633 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:58:52.034655 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:58:52.034678 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034697 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.034724 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:58:52.034749 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034771 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034819 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:58:52.034843 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034873 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:58:52.034936 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.034963 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.034993 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.035018 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035049 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:58:52.035071 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035099 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:58:52.035126 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.035159 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.035194 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.035228 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035263 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035299 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:58:52.035333 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.035368 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035408 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035445 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035477 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:58:52.035512 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:58:52.035534 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035563 17440 command_runner.go:130] > Dec 29 06:55:06 functional-695625 kubelet[6517]: E1229 06:55:06.329330 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="6.4s"
I1229 06:58:52.035631 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.090561 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.035658 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.592540 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.035677 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: I1229 06:55:08.994308 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.035699 17440 command_runner.go:130] > Dec 29 06:55:10 functional-695625 kubelet[6517]: E1229 06:55:10.291711 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035720 17440 command_runner.go:130] > Dec 29 06:55:20 functional-695625 kubelet[6517]: E1229 06:55:20.292793 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035749 17440 command_runner.go:130] > Dec 29 06:55:22 functional-695625 kubelet[6517]: E1229 06:55:22.729733 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.035771 17440 command_runner.go:130] > Dec 29 06:55:30 functional-695625 kubelet[6517]: E1229 06:55:30.293859 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035814 17440 command_runner.go:130] > Dec 29 06:55:39 functional-695625 kubelet[6517]: E1229 06:55:39.730496 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.035838 17440 command_runner.go:130] > Dec 29 06:55:40 functional-695625 kubelet[6517]: E1229 06:55:40.294978 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035902 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.093022 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.035927 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.996721 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.035947 17440 command_runner.go:130] > Dec 29 06:55:43 functional-695625 kubelet[6517]: I1229 06:55:43.798535 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.035978 17440 command_runner.go:130] > Dec 29 06:55:50 functional-695625 kubelet[6517]: E1229 06:55:50.295990 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.036010 17440 command_runner.go:130] > Dec 29 06:55:56 functional-695625 kubelet[6517]: E1229 06:55:56.732252 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.036038 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.228455 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.036061 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.296294 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.036082 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.339811 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.036102 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.590728 6517 scope.go:122] "RemoveContainer" containerID="d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:58:52.036121 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.596576 6517 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:58:52.036141 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.196928 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.036165 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199564 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036190 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199638 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036212 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.199656 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.036251 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199813 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.036275 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.211732 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036299 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.212086 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.036323 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226269 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036345 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226760 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.036369 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226846 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036393 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226932 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.036418 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240397 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036441 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.036464 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240759 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036488 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240798 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.036511 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241099 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036536 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241133 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.036561 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241440 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036584 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241482 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036606 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: I1229 06:56:02.241498 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.036642 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241585 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.036664 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246390 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036687 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246454 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.036711 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246667 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036734 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246717 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036754 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: I1229 06:56:03.246732 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.036806 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246832 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.036895 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.297136 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.036922 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342375 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036945 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036973 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: I1229 06:56:10.342477 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.037009 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342670 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037032 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593708 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037052 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593770 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.037076 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598591 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037098 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598652 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.037122 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606502 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037144 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606600 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.037168 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302101 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037189 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302675 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.037212 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302176 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037235 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302763 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037254 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: I1229 06:56:11.302780 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.037278 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302307 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037303 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302816 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.037325 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.321043 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.037348 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.321965 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037372 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322030 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037392 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.322044 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037424 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322163 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037449 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323008 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037472 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323148 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.037497 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037518 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336097 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037539 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: I1229 06:56:13.336114 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037574 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336243 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037604 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.733654 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.037669 17440 command_runner.go:130] > Dec 29 06:56:16 functional-695625 kubelet[6517]: E1229 06:56:16.095560 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.037694 17440 command_runner.go:130] > Dec 29 06:56:17 functional-695625 kubelet[6517]: E1229 06:56:17.801052 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.037713 17440 command_runner.go:130] > Dec 29 06:56:19 functional-695625 kubelet[6517]: I1229 06:56:19.403026 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.037734 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.297746 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.037760 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342467 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037784 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342554 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037816 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.342589 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037851 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342829 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037875 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.385984 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037897 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386062 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037917 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.386078 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037950 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386220 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037981 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.298955 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038011 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.734998 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.038035 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185639 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038059 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185732 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038079 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.185750 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.038102 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493651 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038125 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493733 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038147 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.493755 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038182 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493996 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038203 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.510294 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.038223 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511464 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038243 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511520 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038260 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.511535 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038297 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511684 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038321 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525404 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038344 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525467 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038365 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: I1229 06:56:34.525482 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038401 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525663 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038423 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.300040 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038449 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342011 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038471 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342082 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038491 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.342099 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038526 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342223 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038549 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567456 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038585 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567665 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038608 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.567686 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038643 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.568152 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038670 17440 command_runner.go:130] > Dec 29 06:56:47 functional-695625 kubelet[6517]: E1229 06:56:47.736964 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.038735 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.098168 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.27202431 +0000 UTC m=+0.287773690,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.038758 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.300747 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038785 17440 command_runner.go:130] > Dec 29 06:56:53 functional-695625 kubelet[6517]: E1229 06:56:53.405155 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.038817 17440 command_runner.go:130] > Dec 29 06:56:56 functional-695625 kubelet[6517]: I1229 06:56:56.606176 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.038842 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.301915 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038869 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.330173 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.038900 17440 command_runner.go:130] > Dec 29 06:57:04 functional-695625 kubelet[6517]: E1229 06:57:04.738681 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.038922 17440 command_runner.go:130] > Dec 29 06:57:10 functional-695625 kubelet[6517]: E1229 06:57:10.302083 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038946 17440 command_runner.go:130] > Dec 29 06:57:20 functional-695625 kubelet[6517]: E1229 06:57:20.302612 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038977 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185645 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039003 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185704 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.039034 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.740062 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.039059 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.185952 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039082 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.186017 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039102 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.186034 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.039126 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.873051 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.039149 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874264 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039171 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874357 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039191 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.874375 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.039227 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874499 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.039252 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039275 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892083 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039295 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: I1229 06:57:23.892098 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.039330 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892218 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.039396 17440 command_runner.go:130] > Dec 29 06:57:24 functional-695625 kubelet[6517]: E1229 06:57:24.100978 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.27223373 +0000 UTC m=+0.287983111,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.039419 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.302837 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039444 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.341968 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039468 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.342033 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039488 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: I1229 06:57:30.342050 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.039523 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.342233 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.039550 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.608375 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.039576 17440 command_runner.go:130] > Dec 29 06:57:32 functional-695625 kubelet[6517]: E1229 06:57:32.186377 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039598 17440 command_runner.go:130] > Dec 29 06:57:32 functional-695625 kubelet[6517]: E1229 06:57:32.186459 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.039675 17440 command_runner.go:130] > Dec 29 06:57:33 functional-695625 kubelet[6517]: E1229 06:57:33.188187 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039700 17440 command_runner.go:130] > Dec 29 06:57:33 functional-695625 kubelet[6517]: E1229 06:57:33.188267 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.039715 17440 command_runner.go:130] > Dec 29 06:57:37 functional-695625 kubelet[6517]: I1229 06:57:37.010219 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.039749 17440 command_runner.go:130] > Dec 29 06:57:38 functional-695625 kubelet[6517]: E1229 06:57:38.741770 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.039773 17440 command_runner.go:130] > Dec 29 06:57:40 functional-695625 kubelet[6517]: E1229 06:57:40.303258 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039808 17440 command_runner.go:130] > Dec 29 06:57:50 functional-695625 kubelet[6517]: E1229 06:57:50.304120 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039837 17440 command_runner.go:130] > Dec 29 06:57:55 functional-695625 kubelet[6517]: E1229 06:57:55.743031 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.039903 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 kubelet[6517]: E1229 06:57:58.103052 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.272240811 +0000 UTC m=+0.287990191,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.039929 17440 command_runner.go:130] > Dec 29 06:58:00 functional-695625 kubelet[6517]: E1229 06:58:00.304627 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039954 17440 command_runner.go:130] > Dec 29 06:58:00 functional-695625 kubelet[6517]: E1229 06:58:00.432518 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.039991 17440 command_runner.go:130] > Dec 29 06:58:00 functional-695625 kubelet[6517]: E1229 06:58:00.432667 6517 csi_plugin.go:399] Failed to initialize CSINode: error updating CSINode annotation: timed out waiting for the condition; caused by: the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)
I1229 06:58:52.040014 17440 command_runner.go:130] > Dec 29 06:58:10 functional-695625 kubelet[6517]: E1229 06:58:10.305485 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040037 17440 command_runner.go:130] > Dec 29 06:58:11 functional-695625 kubelet[6517]: E1229 06:58:11.012407 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.040068 17440 command_runner.go:130] > Dec 29 06:58:12 functional-695625 kubelet[6517]: E1229 06:58:12.743824 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.040086 17440 command_runner.go:130] > Dec 29 06:58:18 functional-695625 kubelet[6517]: I1229 06:58:18.014210 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.040107 17440 command_runner.go:130] > Dec 29 06:58:20 functional-695625 kubelet[6517]: E1229 06:58:20.306630 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040127 17440 command_runner.go:130] > Dec 29 06:58:24 functional-695625 kubelet[6517]: E1229 06:58:24.186554 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040149 17440 command_runner.go:130] > Dec 29 06:58:24 functional-695625 kubelet[6517]: E1229 06:58:24.186719 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.040176 17440 command_runner.go:130] > Dec 29 06:58:29 functional-695625 kubelet[6517]: E1229 06:58:29.745697 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.040195 17440 command_runner.go:130] > Dec 29 06:58:30 functional-695625 kubelet[6517]: E1229 06:58:30.307319 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040256 17440 command_runner.go:130] > Dec 29 06:58:32 functional-695625 kubelet[6517]: E1229 06:58:32.105206 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.286010652 +0000 UTC m=+0.301760032,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.040279 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.184790 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040300 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.184918 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040319 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: I1229 06:58:39.184949 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.040354 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.185100 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040377 17440 command_runner.go:130] > Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.184709 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040397 17440 command_runner.go:130] > Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.184771 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.040413 17440 command_runner.go:130] > Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.308010 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040433 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.185947 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040455 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.186016 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040477 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: I1229 06:58:43.186033 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.040498 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503148 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040520 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503225 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040538 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: I1229 06:58:43.503241 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
I1229 06:58:52.040576 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503356 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040596 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: I1229 06:58:44.519860 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.040619 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.520985 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040640 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.521063 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040658 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: I1229 06:58:44.521079 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
I1229 06:58:52.040692 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.521196 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040711 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537487 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040729 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537563 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040741 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: I1229 06:58:45.537579 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
I1229 06:58:52.040764 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537686 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040784 17440 command_runner.go:130] > Dec 29 06:58:46 functional-695625 kubelet[6517]: E1229 06:58:46.747043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.040807 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:58:52.040815 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:58:52.040821 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:58:52.040830 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: kubelet.service: Consumed 4.400s CPU time, 30.3M memory peak.
I1229 06:58:52.093067 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:58:52.093106 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:58:52.108863 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:58:52.108898 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:58:52.108912 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:58:52.108925 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:58:52.108937 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:58:52.108945 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:58:52.108951 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:58:52.108957 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:58:52.108962 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:58:52.108971 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:58:52.108975 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:58:52.108980 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:58:52.108992 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:52.108997 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:58:52.109006 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:58:52.109011 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:52.109021 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:52.109031 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:52.109036 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:58:52.109043 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:58:52.109048 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:58:52.109055 17440 command_runner.go:130] > [Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
I1229 06:58:52.109062 17440 command_runner.go:130] > [ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
I1229 06:58:52.109067 17440 command_runner.go:130] > [ +20.880271] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109072 17440 command_runner.go:130] > [Dec29 06:57] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109080 17440 command_runner.go:130] > [Dec29 06:58] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109088 17440 command_runner.go:130] > [ +6.672596] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109931 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:58:52.109946 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:59:52.193646 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:59:52.193695 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.083736259s)
W1229 06:59:52.193730 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:59:52.193743 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:59:52.193757 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:59:52.211424 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.211464 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.211503 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.211519 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.211538 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:59:52.211555 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:59:52.211569 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:59:52.211587 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:59:52.211601 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.211612 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:59:52.211630 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.211652 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.211672 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.211696 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.211714 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.211730 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:59:52.211773 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.211790 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:59:52.211824 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.211841 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:59:52.211855 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:59:52.211871 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:59:52.211884 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.211899 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.211913 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.211926 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.211948 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.211959 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.211970 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.211984 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.212011 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.212025 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.212039 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:59:52.212064 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:59:52.212079 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:59:52.212093 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:59:52.212108 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:59:52.212125 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:59:52.212139 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:59:52.212152 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:59:52.212172 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:59:52.212192 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:59:52.212215 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.212237 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.212252 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:59:52.212266 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:59:52.212285 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:59:52.212301 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:59:52.212316 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:59:52.212331 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:59:52.212341 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.212357 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:59:52.212372 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.212392 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.212423 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.212444 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.212461 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.212477 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:59:52.212512 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.212529 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:59:52.212547 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.212562 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:59:52.212577 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.212594 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.212612 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.212628 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.212643 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.212656 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.212671 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:59:52.212684 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:59:52.212699 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:59:52.212714 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.212732 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.212751 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.212767 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.212783 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:59:52.212808 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:59:52.212827 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:59:52.212844 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:59:52.212864 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:59:52.212881 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:59:52.212899 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:59:52.212916 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:59:52.212932 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:59:52.212949 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:59:52.212974 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:59:52.212995 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:59:52.213006 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:59:52.213020 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:59:52.213033 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.213055 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:59:52.213073 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.213094 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.213115 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.213135 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.213153 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.213169 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:59:52.213204 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.213221 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:59:52.213242 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.213258 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:59:52.213275 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.213291 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.213308 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.213321 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.213334 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.213348 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.213387 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213414 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213440 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213465 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213486 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:59:52.213507 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213528 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213549 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213573 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213595 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213616 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213637 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.213655 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.213675 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:59:52.213697 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:59:52.213709 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:59:52.213724 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:59:52.213735 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:59:52.213749 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:59:52.213759 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:59:52.213774 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:59:52.213786 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:59:52.213809 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.213822 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:59:52.213839 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.213856 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.213874 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.213891 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.213907 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.213920 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:59:52.213942 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213963 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213985 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214006 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214028 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214055 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214078 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214099 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214122 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214144 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214166 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214190 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214211 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214242 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.214258 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:59:52.214283 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:59:52.214298 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:59:52.214323 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:59:52.214341 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:59:52.214365 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:59:52.214380 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:59:52.214405 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:59:52.214421 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:59:52.214447 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:59:52.214464 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:59:52.214489 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:59:52.214506 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:59:52.214531 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:59:52.214553 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:59:52.214576 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:59:52.214600 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:59:52.214623 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:59:52.214646 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:59:52.214668 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:59:52.214690 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:59:52.214703 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:59:52.214721 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.214735 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:59:52.214748 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.214762 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.214775 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.214788 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.215123 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.215148 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.215180 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:59:52.215194 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:59:52.215210 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:59:52.215222 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:59:52.215233 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:59:52.215247 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.215265 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.215283 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.215299 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.215312 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:59:52.215324 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:59:52.215340 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:59:52.215355 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:59:52.215372 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:59:52.215389 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:59:52.215401 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:59:52.215409 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:59:52.215430 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215454 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215478 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215500 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215517 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215532 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215549 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:59:52.215565 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215578 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215593 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215606 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215622 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215643 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215667 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215688 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215712 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215738 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215762 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215839 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:59:52.215868 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215888 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:59:52.215912 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:59:52.215937 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215959 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215979 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:59:52.216007 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216027 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216051 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216067 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216084 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 dockerd[4014]: time="2025-12-29T06:56:00.626282205Z" level=info msg="ignoring event" container=78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216097 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 dockerd[4014]: time="2025-12-29T06:56:11.553142622Z" level=info msg="ignoring event" container=18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216112 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 dockerd[4014]: time="2025-12-29T06:56:32.448119389Z" level=info msg="ignoring event" container=0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216128 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:59:52.216141 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:59:52.216157 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216171 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216195 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 dockerd[4014]: time="2025-12-29T06:57:22.465508622Z" level=info msg="ignoring event" container=b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216222 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:59:52.216243 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:59:52.216263 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216276 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216289 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 dockerd[4014]: time="2025-12-29T06:58:43.458641345Z" level=info msg="ignoring event" container=07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216304 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.011072219Z" level=info msg="ignoring event" container=173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216318 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.102126666Z" level=info msg="ignoring event" container=6b7711ee25a2df71f8c7d296f7186875ebd6ab978a71d33f177de0cc3055645b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216331 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.266578298Z" level=info msg="ignoring event" container=a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216346 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.365376654Z" level=info msg="ignoring event" container=fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216365 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.452640794Z" level=info msg="ignoring event" container=4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216380 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.557330204Z" level=info msg="ignoring event" container=d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216392 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.666151542Z" level=info msg="ignoring event" container=0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216409 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.751481082Z" level=info msg="ignoring event" container=f48fc04e347519b276e239ee9a6b0b8e093862313e46174a1815efae670eec9b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216427 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535': Error response from daemon: No such container: 4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535"
I1229 06:59:52.216440 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535'"
I1229 06:59:52.216455 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:59:52.216467 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:59:52.216484 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be': Error response from daemon: No such container: bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be"
I1229 06:59:52.216495 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be'"
I1229 06:59:52.216512 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e': Error response from daemon: No such container: a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e"
I1229 06:59:52.216525 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e'"
I1229 06:59:52.216542 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'd81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974': Error response from daemon: No such container: d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:59:52.216554 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'd81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974'"
I1229 06:59:52.216568 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00': Error response from daemon: No such container: 6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:59:52.216582 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00'"
I1229 06:59:52.216596 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216611 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216628 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d': Error response from daemon: No such container: 8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d"
I1229 06:59:52.216642 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d'"
I1229 06:59:52.216660 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13': Error response from daemon: No such container: 17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:59:52.216673 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13'"
I1229 06:59:52.238629 17440 logs.go:123] Gathering logs for container status ...
I1229 06:59:52.238668 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:59:52.287732 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
W1229 06:59:52.290016 17440 out.go:434] Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
W1229 06:59:52.290080 17440 out.go:285] *
*
W1229 06:59:52.290145 17440 out.go:285] X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
W1229 06:59:52.290156 17440 out.go:285] *
*
W1229 06:59:52.290452 17440 out.go:308] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I1229 06:59:52.293734 17440 out.go:203]
W1229 06:59:52.295449 17440 out.go:285] X Exiting due to GUEST_PORT_IN_USE: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
X Exiting due to GUEST_PORT_IN_USE: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
W1229 06:59:52.295482 17440 out.go:285] * Suggestion: kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p<port> to find the process and kill it
* Suggestion: kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p<port> to find the process and kill it
W1229 06:59:52.295500 17440 out.go:285] * Related issue: https://github.com/kubernetes/minikube/issues/5484
* Related issue: https://github.com/kubernetes/minikube/issues/5484
I1229 06:59:52.296904 17440 out.go:203]
** /stderr **
functional_test.go:676: failed to soft start minikube. args "out/minikube-linux-amd64 start -p functional-695625 --alsologtostderr -v=8": exit status 81
functional_test.go:678: soft start took 6m30.726594975s for "functional-695625" cluster.
I1229 06:59:52.921439 13486 config.go:182] Loaded profile config "functional-695625": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestFunctional/serial/SoftStart]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-695625 -n functional-695625
helpers_test.go:248: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p functional-695625 -n functional-695625: exit status 2 (15.838291196s)
-- stdout --
Running
-- /stdout --
helpers_test.go:248: status error: exit status 2 (may be ok)
helpers_test.go:253: <<< TestFunctional/serial/SoftStart FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestFunctional/serial/SoftStart]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p functional-695625 logs -n 25
helpers_test.go:256: (dbg) Done: out/minikube-linux-amd64 -p functional-695625 logs -n 25: (1m1.113583499s)
helpers_test.go:261: TestFunctional/serial/SoftStart logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ addons-909246 ssh cat /opt/local-path-provisioner/pvc-60e48b23-4f43-4f44-8576-c979927d0800_default_test-pvc/file1 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:50 UTC │ 29 Dec 25 06:50 UTC │
│ addons │ addons-909246 addons disable storage-provisioner-rancher --alsologtostderr -v=1 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:50 UTC │ 29 Dec 25 06:50 UTC │
│ addons │ addons-909246 addons disable volumesnapshots --alsologtostderr -v=1 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:50 UTC │ 29 Dec 25 06:50 UTC │
│ addons │ addons-909246 addons disable csi-hostpath-driver --alsologtostderr -v=1 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:50 UTC │ 29 Dec 25 06:51 UTC │
│ stop │ -p addons-909246 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ addons │ enable dashboard -p addons-909246 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ addons │ disable dashboard -p addons-909246 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ addons │ disable gvisor -p addons-909246 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ delete │ -p addons-909246 │ addons-909246 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ start │ -p nospam-039815 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-039815 --driver=kvm2 │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ start │ nospam-039815 --log_dir /tmp/nospam-039815 start --dry-run │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ │
│ start │ nospam-039815 --log_dir /tmp/nospam-039815 start --dry-run │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ │
│ start │ nospam-039815 --log_dir /tmp/nospam-039815 start --dry-run │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ │
│ pause │ nospam-039815 --log_dir /tmp/nospam-039815 pause │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ pause │ nospam-039815 --log_dir /tmp/nospam-039815 pause │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ pause │ nospam-039815 --log_dir /tmp/nospam-039815 pause │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ unpause │ nospam-039815 --log_dir /tmp/nospam-039815 unpause │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:51 UTC │
│ unpause │ nospam-039815 --log_dir /tmp/nospam-039815 unpause │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:51 UTC │ 29 Dec 25 06:52 UTC │
│ unpause │ nospam-039815 --log_dir /tmp/nospam-039815 unpause │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:52 UTC │ 29 Dec 25 06:52 UTC │
│ stop │ nospam-039815 --log_dir /tmp/nospam-039815 stop │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:52 UTC │ 29 Dec 25 06:52 UTC │
│ stop │ nospam-039815 --log_dir /tmp/nospam-039815 stop │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:52 UTC │ 29 Dec 25 06:52 UTC │
│ stop │ nospam-039815 --log_dir /tmp/nospam-039815 stop │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:52 UTC │ 29 Dec 25 06:52 UTC │
│ delete │ -p nospam-039815 │ nospam-039815 │ jenkins │ v1.37.0 │ 29 Dec 25 06:52 UTC │ 29 Dec 25 06:52 UTC │
│ start │ -p functional-695625 --memory=4096 --apiserver-port=8441 --wait=all --driver=kvm2 │ functional-695625 │ jenkins │ v1.37.0 │ 29 Dec 25 06:52 UTC │ 29 Dec 25 06:53 UTC │
│ start │ -p functional-695625 --alsologtostderr -v=8 │ functional-695625 │ jenkins │ v1.37.0 │ 29 Dec 25 06:53 UTC │ │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/29 06:53:22
Running on machine: ubuntu-20-agent-7
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1229 06:53:22.250786 17440 out.go:360] Setting OutFile to fd 1 ...
I1229 06:53:22.251073 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:53:22.251082 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:53:22.251087 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:53:22.251322 17440 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22353-9552/.minikube/bin
I1229 06:53:22.251807 17440 out.go:368] Setting JSON to false
I1229 06:53:22.252599 17440 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":2152,"bootTime":1766989050,"procs":176,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1229 06:53:22.252669 17440 start.go:143] virtualization: kvm guest
I1229 06:53:22.254996 17440 out.go:179] * [functional-695625] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1229 06:53:22.256543 17440 notify.go:221] Checking for updates...
I1229 06:53:22.256551 17440 out.go:179] - MINIKUBE_LOCATION=22353
I1229 06:53:22.258115 17440 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1229 06:53:22.259464 17440 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22353-9552/kubeconfig
I1229 06:53:22.260823 17440 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22353-9552/.minikube
I1229 06:53:22.262461 17440 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1229 06:53:22.263830 17440 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1229 06:53:22.265499 17440 config.go:182] Loaded profile config "functional-695625": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I1229 06:53:22.265604 17440 driver.go:422] Setting default libvirt URI to qemu:///system
I1229 06:53:22.301877 17440 out.go:179] * Using the kvm2 driver based on existing profile
I1229 06:53:22.303062 17440 start.go:309] selected driver: kvm2
I1229 06:53:22.303099 17440 start.go:928] validating driver "kvm2" against &{Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.35.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:2621
44 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1229 06:53:22.303255 17440 start.go:939] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1229 06:53:22.304469 17440 cni.go:84] Creating CNI manager for ""
I1229 06:53:22.304541 17440 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1229 06:53:22.304607 17440 start.go:353] cluster config:
{Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:functional-695625 Namespace:default APIServer
HAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: Disa
bleOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1229 06:53:22.304716 17440 iso.go:125] acquiring lock: {Name:mk2adf09d18eb25f1d98559b1ab4af84fc4e9a54 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1229 06:53:22.306617 17440 out.go:179] * Starting "functional-695625" primary control-plane node in "functional-695625" cluster
I1229 06:53:22.307989 17440 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I1229 06:53:22.308028 17440 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22353-9552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-amd64.tar.lz4
I1229 06:53:22.308037 17440 cache.go:65] Caching tarball of preloaded images
I1229 06:53:22.308172 17440 preload.go:251] Found /home/jenkins/minikube-integration/22353-9552/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1229 06:53:22.308185 17440 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0 on docker
I1229 06:53:22.308288 17440 profile.go:143] Saving config to /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/config.json ...
I1229 06:53:22.308499 17440 start.go:360] acquireMachinesLock for functional-695625: {Name:mk15f2078da2c2dd9529f5e9a0dd3e4cc97196c6 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1229 06:53:22.308543 17440 start.go:364] duration metric: took 25.28µs to acquireMachinesLock for "functional-695625"
I1229 06:53:22.308555 17440 start.go:96] Skipping create...Using existing machine configuration
I1229 06:53:22.308560 17440 fix.go:54] fixHost starting:
I1229 06:53:22.310738 17440 fix.go:112] recreateIfNeeded on functional-695625: state=Running err=<nil>
W1229 06:53:22.310765 17440 fix.go:138] unexpected machine state, will restart: <nil>
I1229 06:53:22.313927 17440 out.go:252] * Updating the running kvm2 "functional-695625" VM ...
I1229 06:53:22.313960 17440 machine.go:94] provisionDockerMachine start ...
I1229 06:53:22.317184 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.317690 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.317748 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.317941 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.318146 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.318156 17440 main.go:144] libmachine: About to run SSH command:
hostname
I1229 06:53:22.424049 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: functional-695625
I1229 06:53:22.424102 17440 buildroot.go:166] provisioning hostname "functional-695625"
I1229 06:53:22.427148 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.427685 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.427715 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.427957 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.428261 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.428280 17440 main.go:144] libmachine: About to run SSH command:
sudo hostname functional-695625 && echo "functional-695625" | sudo tee /etc/hostname
I1229 06:53:22.552563 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: functional-695625
I1229 06:53:22.555422 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.555807 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.555834 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.556061 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.556278 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.556302 17440 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-695625' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-695625/g' /etc/hosts;
else
echo '127.0.1.1 functional-695625' | sudo tee -a /etc/hosts;
fi
fi
I1229 06:53:22.661438 17440 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1229 06:53:22.661470 17440 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/22353-9552/.minikube CaCertPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22353-9552/.minikube}
I1229 06:53:22.661505 17440 buildroot.go:174] setting up certificates
I1229 06:53:22.661529 17440 provision.go:84] configureAuth start
I1229 06:53:22.664985 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.665439 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.665459 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.667758 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.668124 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.668145 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.668257 17440 provision.go:143] copyHostCerts
I1229 06:53:22.668280 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem
I1229 06:53:22.668308 17440 exec_runner.go:144] found /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem, removing ...
I1229 06:53:22.668317 17440 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem
I1229 06:53:22.668383 17440 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22353-9552/.minikube/ca.pem (1082 bytes)
I1229 06:53:22.668476 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem
I1229 06:53:22.668505 17440 exec_runner.go:144] found /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem, removing ...
I1229 06:53:22.668512 17440 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem
I1229 06:53:22.668541 17440 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22353-9552/.minikube/cert.pem (1123 bytes)
I1229 06:53:22.668582 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem
I1229 06:53:22.668598 17440 exec_runner.go:144] found /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem, removing ...
I1229 06:53:22.668603 17440 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem
I1229 06:53:22.668632 17440 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22353-9552/.minikube/key.pem (1675 bytes)
I1229 06:53:22.668676 17440 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca-key.pem org=jenkins.functional-695625 san=[127.0.0.1 192.168.39.121 functional-695625 localhost minikube]
I1229 06:53:22.746489 17440 provision.go:177] copyRemoteCerts
I1229 06:53:22.746545 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1229 06:53:22.749128 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.749596 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.749616 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.749757 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:22.836885 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I1229 06:53:22.836959 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1229 06:53:22.872390 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I1229 06:53:22.872481 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1229 06:53:22.908829 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem -> /etc/docker/server.pem
I1229 06:53:22.908896 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1229 06:53:22.941014 17440 provision.go:87] duration metric: took 279.457536ms to configureAuth
I1229 06:53:22.941053 17440 buildroot.go:189] setting minikube options for container-runtime
I1229 06:53:22.941277 17440 config.go:182] Loaded profile config "functional-695625": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I1229 06:53:22.944375 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.944857 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:22.944916 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:22.945128 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:22.945387 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:22.945402 17440 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1229 06:53:23.052106 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: tmpfs
I1229 06:53:23.052136 17440 buildroot.go:70] root file system type: tmpfs
I1229 06:53:23.052304 17440 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1229 06:53:23.055887 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.056416 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.056446 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.056629 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:23.056893 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:23.056961 17440 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1229 06:53:23.183096 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1229 06:53:23.186465 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.186943 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.187006 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.187227 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:23.187475 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:23.187494 17440 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1229 06:53:23.306011 17440 main.go:144] libmachine: SSH cmd err, output: <nil>:
I1229 06:53:23.306077 17440 machine.go:97] duration metric: took 992.109676ms to provisionDockerMachine
I1229 06:53:23.306099 17440 start.go:293] postStartSetup for "functional-695625" (driver="kvm2")
I1229 06:53:23.306114 17440 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1229 06:53:23.306201 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1229 06:53:23.309537 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.309944 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.309967 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.310122 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:23.393657 17440 ssh_runner.go:195] Run: cat /etc/os-release
I1229 06:53:23.398689 17440 command_runner.go:130] > NAME=Buildroot
I1229 06:53:23.398723 17440 command_runner.go:130] > VERSION=2025.02
I1229 06:53:23.398731 17440 command_runner.go:130] > ID=buildroot
I1229 06:53:23.398737 17440 command_runner.go:130] > VERSION_ID=2025.02
I1229 06:53:23.398745 17440 command_runner.go:130] > PRETTY_NAME="Buildroot 2025.02"
I1229 06:53:23.398791 17440 info.go:137] Remote host: Buildroot 2025.02
I1229 06:53:23.398821 17440 filesync.go:126] Scanning /home/jenkins/minikube-integration/22353-9552/.minikube/addons for local assets ...
I1229 06:53:23.398897 17440 filesync.go:126] Scanning /home/jenkins/minikube-integration/22353-9552/.minikube/files for local assets ...
I1229 06:53:23.398981 17440 filesync.go:149] local asset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem -> 134862.pem in /etc/ssl/certs
I1229 06:53:23.398993 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem -> /etc/ssl/certs/134862.pem
I1229 06:53:23.399068 17440 filesync.go:149] local asset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/test/nested/copy/13486/hosts -> hosts in /etc/test/nested/copy/13486
I1229 06:53:23.399075 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/test/nested/copy/13486/hosts -> /etc/test/nested/copy/13486/hosts
I1229 06:53:23.399114 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/13486
I1229 06:53:23.412045 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem --> /etc/ssl/certs/134862.pem (1708 bytes)
I1229 06:53:23.445238 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/test/nested/copy/13486/hosts --> /etc/test/nested/copy/13486/hosts (40 bytes)
I1229 06:53:23.479048 17440 start.go:296] duration metric: took 172.930561ms for postStartSetup
I1229 06:53:23.479099 17440 fix.go:56] duration metric: took 1.170538464s for fixHost
I1229 06:53:23.482307 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.482761 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.482808 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.483049 17440 main.go:144] libmachine: Using SSH client type: native
I1229 06:53:23.483313 17440 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e300] 0x850fa0 <nil> [] 0s} 192.168.39.121 22 <nil> <nil>}
I1229 06:53:23.483327 17440 main.go:144] libmachine: About to run SSH command:
date +%s.%N
I1229 06:53:23.586553 17440 main.go:144] libmachine: SSH cmd err, output: <nil>: 1766991203.580410695
I1229 06:53:23.586572 17440 fix.go:216] guest clock: 1766991203.580410695
I1229 06:53:23.586579 17440 fix.go:229] Guest: 2025-12-29 06:53:23.580410695 +0000 UTC Remote: 2025-12-29 06:53:23.479103806 +0000 UTC m=+1.278853461 (delta=101.306889ms)
I1229 06:53:23.586594 17440 fix.go:200] guest clock delta is within tolerance: 101.306889ms
I1229 06:53:23.586598 17440 start.go:83] releasing machines lock for "functional-695625", held for 1.278049275s
I1229 06:53:23.590004 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.590438 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.590463 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.591074 17440 ssh_runner.go:195] Run: cat /version.json
I1229 06:53:23.591186 17440 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1229 06:53:23.594362 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.594454 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.594831 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.594868 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.594954 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:23.595021 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:23.595083 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:23.595278 17440 sshutil.go:53] new ssh client: &{IP:192.168.39.121 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22353-9552/.minikube/machines/functional-695625/id_rsa Username:docker}
I1229 06:53:23.692873 17440 command_runner.go:130] > <a href="https://github.com/kubernetes/registry.k8s.io">Temporary Redirect</a>.
I1229 06:53:23.692948 17440 command_runner.go:130] > {"iso_version": "v1.37.0-1766979747-22353", "kicbase_version": "v0.0.48-1766884053-22351", "minikube_version": "v1.37.0", "commit": "f5189b2bdbb6990e595e25e06a017f8901d29fa8"}
I1229 06:53:23.693063 17440 ssh_runner.go:195] Run: systemctl --version
I1229 06:53:23.700357 17440 command_runner.go:130] > systemd 256 (256.7)
I1229 06:53:23.700393 17440 command_runner.go:130] > -PAM -AUDIT -SELINUX -APPARMOR -IMA -SMACK +SECCOMP +GCRYPT -GNUTLS +OPENSSL +ACL +BLKID +CURL -ELFUTILS -FIDO2 -IDN2 -IDN +IPTC +KMOD -LIBCRYPTSETUP -LIBCRYPTSETUP_PLUGINS +LIBFDISK +PCRE2 -PWQUALITY -P11KIT -QRENCODE -TPM2 -BZIP2 +LZ4 +XZ +ZLIB -ZSTD -BPF_FRAMEWORK -XKBCOMMON -UTMP -SYSVINIT -LIBARCHIVE
I1229 06:53:23.700501 17440 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1229 06:53:23.707230 17440 command_runner.go:130] ! stat: cannot statx '/etc/cni/net.d/*loopback.conf*': No such file or directory
W1229 06:53:23.707369 17440 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1229 06:53:23.707433 17440 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1229 06:53:23.719189 17440 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1229 06:53:23.719220 17440 start.go:496] detecting cgroup driver to use...
I1229 06:53:23.719246 17440 start.go:519] Kubernetes 1.35.0+ detected, using "systemd" cgroup driver
I1229 06:53:23.719351 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1229 06:53:23.744860 17440 command_runner.go:130] > runtime-endpoint: unix:///run/containerd/containerd.sock
I1229 06:53:23.744940 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1229 06:53:23.758548 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1229 06:53:23.773051 17440 containerd.go:147] configuring containerd to use "systemd" as cgroup driver...
I1229 06:53:23.773122 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I1229 06:53:23.786753 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1229 06:53:23.800393 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1229 06:53:23.813395 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1229 06:53:23.826600 17440 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1229 06:53:23.840992 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1229 06:53:23.854488 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1229 06:53:23.869084 17440 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1229 06:53:23.882690 17440 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1229 06:53:23.894430 17440 command_runner.go:130] > net.bridge.bridge-nf-call-iptables = 1
I1229 06:53:23.894542 17440 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1229 06:53:23.912444 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:24.139583 17440 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1229 06:53:24.191402 17440 start.go:496] detecting cgroup driver to use...
I1229 06:53:24.191457 17440 start.go:519] Kubernetes 1.35.0+ detected, using "systemd" cgroup driver
I1229 06:53:24.191521 17440 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1229 06:53:24.217581 17440 command_runner.go:130] > # /usr/lib/systemd/system/docker.service
I1229 06:53:24.217604 17440 command_runner.go:130] > [Unit]
I1229 06:53:24.217609 17440 command_runner.go:130] > Description=Docker Application Container Engine
I1229 06:53:24.217615 17440 command_runner.go:130] > Documentation=https://docs.docker.com
I1229 06:53:24.217626 17440 command_runner.go:130] > After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
I1229 06:53:24.217631 17440 command_runner.go:130] > Wants=network-online.target containerd.service
I1229 06:53:24.217635 17440 command_runner.go:130] > Requires=docker.socket
I1229 06:53:24.217638 17440 command_runner.go:130] > StartLimitBurst=3
I1229 06:53:24.217642 17440 command_runner.go:130] > StartLimitIntervalSec=60
I1229 06:53:24.217646 17440 command_runner.go:130] > [Service]
I1229 06:53:24.217649 17440 command_runner.go:130] > Type=notify
I1229 06:53:24.217653 17440 command_runner.go:130] > Restart=always
I1229 06:53:24.217660 17440 command_runner.go:130] > ExecStart=
I1229 06:53:24.217694 17440 command_runner.go:130] > ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
I1229 06:53:24.217710 17440 command_runner.go:130] > ExecReload=/bin/kill -s HUP $MAINPID
I1229 06:53:24.217748 17440 command_runner.go:130] > # Having non-zero Limit*s causes performance problems due to accounting overhead
I1229 06:53:24.217761 17440 command_runner.go:130] > # in the kernel. We recommend using cgroups to do container-local accounting.
I1229 06:53:24.217767 17440 command_runner.go:130] > LimitNOFILE=infinity
I1229 06:53:24.217782 17440 command_runner.go:130] > LimitNPROC=infinity
I1229 06:53:24.217790 17440 command_runner.go:130] > LimitCORE=infinity
I1229 06:53:24.217818 17440 command_runner.go:130] > # Uncomment TasksMax if your systemd version supports it.
I1229 06:53:24.217828 17440 command_runner.go:130] > # Only systemd 226 and above support this version.
I1229 06:53:24.217833 17440 command_runner.go:130] > TasksMax=infinity
I1229 06:53:24.217840 17440 command_runner.go:130] > # set delegate yes so that systemd does not reset the cgroups of docker containers
I1229 06:53:24.217847 17440 command_runner.go:130] > Delegate=yes
I1229 06:53:24.217855 17440 command_runner.go:130] > # kill only the docker process, not all processes in the cgroup
I1229 06:53:24.217864 17440 command_runner.go:130] > KillMode=process
I1229 06:53:24.217871 17440 command_runner.go:130] > OOMScoreAdjust=-500
I1229 06:53:24.217881 17440 command_runner.go:130] > [Install]
I1229 06:53:24.217896 17440 command_runner.go:130] > WantedBy=multi-user.target
I1229 06:53:24.217973 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1229 06:53:24.255457 17440 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1229 06:53:24.293449 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1229 06:53:24.313141 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1229 06:53:24.332090 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1229 06:53:24.359168 17440 command_runner.go:130] > runtime-endpoint: unix:///var/run/cri-dockerd.sock
I1229 06:53:24.359453 17440 ssh_runner.go:195] Run: which cri-dockerd
I1229 06:53:24.364136 17440 command_runner.go:130] > /usr/bin/cri-dockerd
I1229 06:53:24.364255 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1229 06:53:24.377342 17440 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1229 06:53:24.400807 17440 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1229 06:53:24.632265 17440 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1229 06:53:24.860401 17440 docker.go:578] configuring docker to use "systemd" as cgroup driver...
I1229 06:53:24.860544 17440 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I1229 06:53:24.885002 17440 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1229 06:53:24.902479 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:25.138419 17440 ssh_runner.go:195] Run: sudo systemctl restart docker
I1229 06:53:48.075078 17440 ssh_runner.go:235] Completed: sudo systemctl restart docker: (22.936617903s)
I1229 06:53:48.075181 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1229 06:53:48.109404 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1229 06:53:48.160259 17440 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1229 06:53:48.213352 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1229 06:53:48.231311 17440 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1229 06:53:48.408709 17440 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1229 06:53:48.584722 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:48.754219 17440 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1229 06:53:48.798068 17440 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1229 06:53:48.815248 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:48.983637 17440 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1229 06:53:49.117354 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1229 06:53:49.139900 17440 start.go:553] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1229 06:53:49.139985 17440 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1229 06:53:49.146868 17440 command_runner.go:130] > File: /var/run/cri-dockerd.sock
I1229 06:53:49.146900 17440 command_runner.go:130] > Size: 0 Blocks: 0 IO Block: 4096 socket
I1229 06:53:49.146910 17440 command_runner.go:130] > Device: 0,23 Inode: 2092 Links: 1
I1229 06:53:49.146918 17440 command_runner.go:130] > Access: (0660/srw-rw----) Uid: ( 0/ root) Gid: ( 1000/ docker)
I1229 06:53:49.146926 17440 command_runner.go:130] > Access: 2025-12-29 06:53:49.121969518 +0000
I1229 06:53:49.146933 17440 command_runner.go:130] > Modify: 2025-12-29 06:53:48.995956445 +0000
I1229 06:53:49.146940 17440 command_runner.go:130] > Change: 2025-12-29 06:53:49.012958222 +0000
I1229 06:53:49.146947 17440 command_runner.go:130] > Birth: 2025-12-29 06:53:48.995956445 +0000
I1229 06:53:49.146986 17440 start.go:574] Will wait 60s for crictl version
I1229 06:53:49.147040 17440 ssh_runner.go:195] Run: which crictl
I1229 06:53:49.152717 17440 command_runner.go:130] > /usr/bin/crictl
I1229 06:53:49.152823 17440 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1229 06:53:49.184154 17440 command_runner.go:130] > Version: 0.1.0
I1229 06:53:49.184179 17440 command_runner.go:130] > RuntimeName: docker
I1229 06:53:49.184183 17440 command_runner.go:130] > RuntimeVersion: 28.5.2
I1229 06:53:49.184188 17440 command_runner.go:130] > RuntimeApiVersion: v1
I1229 06:53:49.184211 17440 start.go:590] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.2
RuntimeApiVersion: v1
I1229 06:53:49.184266 17440 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1229 06:53:49.212414 17440 command_runner.go:130] > 28.5.2
I1229 06:53:49.213969 17440 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1229 06:53:49.257526 17440 command_runner.go:130] > 28.5.2
I1229 06:53:49.262261 17440 out.go:252] * Preparing Kubernetes v1.35.0 on Docker 28.5.2 ...
I1229 06:53:49.266577 17440 main.go:144] libmachine: domain functional-695625 has defined MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:49.267255 17440 main.go:144] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:66:32:c1", ip: ""} in network mk-functional-695625: {Iface:virbr1 ExpiryTime:2025-12-29 07:52:21 +0000 UTC Type:0 Mac:52:54:00:66:32:c1 Iaid: IPaddr:192.168.39.121 Prefix:24 Hostname:functional-695625 Clientid:01:52:54:00:66:32:c1}
I1229 06:53:49.267298 17440 main.go:144] libmachine: domain functional-695625 has defined IP address 192.168.39.121 and MAC address 52:54:00:66:32:c1 in network mk-functional-695625
I1229 06:53:49.267633 17440 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I1229 06:53:49.286547 17440 command_runner.go:130] > 192.168.39.1 host.minikube.internal
I1229 06:53:49.286686 17440 kubeadm.go:884] updating cluster {Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.35.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions
:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I1229 06:53:49.286896 17440 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I1229 06:53:49.286965 17440 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1229 06:53:49.324994 17440 command_runner.go:130] > registry.k8s.io/kube-apiserver:v1.35.0
I1229 06:53:49.325029 17440 command_runner.go:130] > registry.k8s.io/kube-controller-manager:v1.35.0
I1229 06:53:49.325037 17440 command_runner.go:130] > registry.k8s.io/kube-proxy:v1.35.0
I1229 06:53:49.325045 17440 command_runner.go:130] > registry.k8s.io/kube-scheduler:v1.35.0
I1229 06:53:49.325052 17440 command_runner.go:130] > registry.k8s.io/etcd:3.6.6-0
I1229 06:53:49.325060 17440 command_runner.go:130] > registry.k8s.io/coredns/coredns:v1.13.1
I1229 06:53:49.325067 17440 command_runner.go:130] > registry.k8s.io/pause:3.10.1
I1229 06:53:49.325074 17440 command_runner.go:130] > gcr.io/k8s-minikube/storage-provisioner:v5
I1229 06:53:49.325113 17440 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1229 06:53:49.325127 17440 docker.go:624] Images already preloaded, skipping extraction
I1229 06:53:49.325191 17440 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1229 06:53:49.352256 17440 command_runner.go:130] > registry.k8s.io/kube-apiserver:v1.35.0
I1229 06:53:49.352294 17440 command_runner.go:130] > registry.k8s.io/kube-proxy:v1.35.0
I1229 06:53:49.352301 17440 command_runner.go:130] > registry.k8s.io/kube-scheduler:v1.35.0
I1229 06:53:49.352309 17440 command_runner.go:130] > registry.k8s.io/kube-controller-manager:v1.35.0
I1229 06:53:49.352315 17440 command_runner.go:130] > registry.k8s.io/etcd:3.6.6-0
I1229 06:53:49.352323 17440 command_runner.go:130] > registry.k8s.io/coredns/coredns:v1.13.1
I1229 06:53:49.352349 17440 command_runner.go:130] > registry.k8s.io/pause:3.10.1
I1229 06:53:49.352361 17440 command_runner.go:130] > gcr.io/k8s-minikube/storage-provisioner:v5
I1229 06:53:49.352398 17440 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1229 06:53:49.352412 17440 cache_images.go:86] Images are preloaded, skipping loading
I1229 06:53:49.352427 17440 kubeadm.go:935] updating node { 192.168.39.121 8441 v1.35.0 docker true true} ...
I1229 06:53:49.352542 17440 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-695625 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.121
[Install]
config:
{KubernetesVersion:v1.35.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1229 06:53:49.352611 17440 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1229 06:53:49.466471 17440 command_runner.go:130] > systemd
I1229 06:53:49.469039 17440 cni.go:84] Creating CNI manager for ""
I1229 06:53:49.469084 17440 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1229 06:53:49.469108 17440 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1229 06:53:49.469137 17440 kubeadm.go:197] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.39.121 APIServerPort:8441 KubernetesVersion:v1.35.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-695625 NodeName:functional-695625 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.121"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.121 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPa
th:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1229 06:53:49.469275 17440 kubeadm.go:203] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.121
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "functional-695625"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.121"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.121"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1229 06:53:49.469338 17440 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0
I1229 06:53:49.495545 17440 command_runner.go:130] > kubeadm
I1229 06:53:49.495573 17440 command_runner.go:130] > kubectl
I1229 06:53:49.495580 17440 command_runner.go:130] > kubelet
I1229 06:53:49.495602 17440 binaries.go:51] Found k8s binaries, skipping transfer
I1229 06:53:49.495647 17440 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1229 06:53:49.521658 17440 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (318 bytes)
I1229 06:53:49.572562 17440 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1229 06:53:49.658210 17440 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2223 bytes)
I1229 06:53:49.740756 17440 ssh_runner.go:195] Run: grep 192.168.39.121 control-plane.minikube.internal$ /etc/hosts
I1229 06:53:49.746333 17440 command_runner.go:130] > 192.168.39.121 control-plane.minikube.internal
I1229 06:53:49.746402 17440 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1229 06:53:50.073543 17440 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1229 06:53:50.148789 17440 certs.go:69] Setting up /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625 for IP: 192.168.39.121
I1229 06:53:50.148837 17440 certs.go:195] generating shared ca certs ...
I1229 06:53:50.148860 17440 certs.go:227] acquiring lock for ca certs: {Name:mke00d9bdd9ac6280bcf2843fe76ff41695d9199 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1229 06:53:50.149082 17440 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.key
I1229 06:53:50.149152 17440 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.key
I1229 06:53:50.149169 17440 certs.go:257] generating profile certs ...
I1229 06:53:50.149320 17440 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/client.key
I1229 06:53:50.149413 17440 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.key.a4651613
I1229 06:53:50.149478 17440 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.key
I1229 06:53:50.149490 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I1229 06:53:50.149508 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I1229 06:53:50.149525 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I1229 06:53:50.149541 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I1229 06:53:50.149556 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I1229 06:53:50.149573 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I1229 06:53:50.149588 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I1229 06:53:50.149607 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I1229 06:53:50.149673 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486.pem (1338 bytes)
W1229 06:53:50.149723 17440 certs.go:480] ignoring /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486_empty.pem, impossibly tiny 0 bytes
I1229 06:53:50.149738 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca-key.pem (1679 bytes)
I1229 06:53:50.149776 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/ca.pem (1082 bytes)
I1229 06:53:50.149837 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/cert.pem (1123 bytes)
I1229 06:53:50.149873 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/key.pem (1675 bytes)
I1229 06:53:50.149950 17440 certs.go:484] found cert: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem (1708 bytes)
I1229 06:53:50.150003 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:50.150023 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486.pem -> /usr/share/ca-certificates/13486.pem
I1229 06:53:50.150038 17440 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem -> /usr/share/ca-certificates/134862.pem
I1229 06:53:50.150853 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1229 06:53:50.233999 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1229 06:53:50.308624 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1229 06:53:50.436538 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1229 06:53:50.523708 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1229 06:53:50.633239 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1229 06:53:50.746852 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1229 06:53:50.793885 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1229 06:53:50.894956 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1229 06:53:50.955149 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/certs/13486.pem --> /usr/share/ca-certificates/13486.pem (1338 bytes)
I1229 06:53:51.018694 17440 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22353-9552/.minikube/files/etc/ssl/certs/134862.pem --> /usr/share/ca-certificates/134862.pem (1708 bytes)
I1229 06:53:51.084938 17440 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I1229 06:53:51.127238 17440 ssh_runner.go:195] Run: openssl version
I1229 06:53:51.136812 17440 command_runner.go:130] > OpenSSL 3.4.1 11 Feb 2025 (Library: OpenSSL 3.4.1 11 Feb 2025)
I1229 06:53:51.136914 17440 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.154297 17440 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I1229 06:53:51.175503 17440 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.182560 17440 command_runner.go:130] > -rw-r--r-- 1 root root 1111 Dec 29 06:46 /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.182600 17440 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 29 06:46 /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.182653 17440 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1229 06:53:51.195355 17440 command_runner.go:130] > b5213941
I1229 06:53:51.195435 17440 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I1229 06:53:51.217334 17440 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/13486.pem
I1229 06:53:51.233542 17440 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/13486.pem /etc/ssl/certs/13486.pem
I1229 06:53:51.248778 17440 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13486.pem
I1229 06:53:51.255758 17440 command_runner.go:130] > -rw-r--r-- 1 root root 1338 Dec 29 06:52 /usr/share/ca-certificates/13486.pem
I1229 06:53:51.255826 17440 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 29 06:52 /usr/share/ca-certificates/13486.pem
I1229 06:53:51.255874 17440 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13486.pem
I1229 06:53:51.272983 17440 command_runner.go:130] > 51391683
I1229 06:53:51.273077 17440 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I1229 06:53:51.303911 17440 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/134862.pem
I1229 06:53:51.325828 17440 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/134862.pem /etc/ssl/certs/134862.pem
I1229 06:53:51.347788 17440 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/134862.pem
I1229 06:53:51.360429 17440 command_runner.go:130] > -rw-r--r-- 1 root root 1708 Dec 29 06:52 /usr/share/ca-certificates/134862.pem
I1229 06:53:51.360567 17440 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 29 06:52 /usr/share/ca-certificates/134862.pem
I1229 06:53:51.360625 17440 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/134862.pem
I1229 06:53:51.369235 17440 command_runner.go:130] > 3ec20f2e
I1229 06:53:51.369334 17440 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I1229 06:53:51.381517 17440 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1229 06:53:51.387517 17440 command_runner.go:130] > File: /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1229 06:53:51.387548 17440 command_runner.go:130] > Size: 1176 Blocks: 8 IO Block: 4096 regular file
I1229 06:53:51.387554 17440 command_runner.go:130] > Device: 253,1 Inode: 1052441 Links: 1
I1229 06:53:51.387560 17440 command_runner.go:130] > Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root)
I1229 06:53:51.387568 17440 command_runner.go:130] > Access: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387572 17440 command_runner.go:130] > Modify: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387577 17440 command_runner.go:130] > Change: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387581 17440 command_runner.go:130] > Birth: 2025-12-29 06:52:32.673454347 +0000
I1229 06:53:51.387657 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1229 06:53:51.396600 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.397131 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1229 06:53:51.410180 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.410283 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1229 06:53:51.419062 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.419164 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1229 06:53:51.431147 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.431222 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1229 06:53:51.441881 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.442104 17440 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1229 06:53:51.450219 17440 command_runner.go:130] > Certificate will not expire
I1229 06:53:51.450295 17440 kubeadm.go:401] StartCluster: {Name:functional-695625 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22353/minikube-v1.37.0-1766979747-22353-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1766979815-22353@sha256:20dad5895b49b986a1253c0faab60865204843ac97fd3a6e6210da5896244409 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35
.0 ClusterName:functional-695625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.121 Port:8441 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I1229 06:53:51.450396 17440 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1229 06:53:51.474716 17440 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1229 06:53:51.489086 17440 command_runner.go:130] > /var/lib/kubelet/config.yaml
I1229 06:53:51.489107 17440 command_runner.go:130] > /var/lib/kubelet/kubeadm-flags.env
I1229 06:53:51.489113 17440 command_runner.go:130] > /var/lib/minikube/etcd:
I1229 06:53:51.489117 17440 command_runner.go:130] > member
I1229 06:53:51.489676 17440 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1229 06:53:51.489694 17440 kubeadm.go:598] restartPrimaryControlPlane start ...
I1229 06:53:51.489753 17440 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1229 06:53:51.503388 17440 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1229 06:53:51.503948 17440 kubeconfig.go:125] found "functional-695625" server: "https://192.168.39.121:8441"
I1229 06:53:51.504341 17440 loader.go:405] Config loaded from file: /home/jenkins/minikube-integration/22353-9552/kubeconfig
I1229 06:53:51.504505 17440 kapi.go:59] client config for functional-695625: &rest.Config{Host:"https://192.168.39.121:8441", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/client.crt", KeyFile:"/home/jenkins/minikube-integration/22353-9552/.minikube/profiles/functional-695625/client.key", CAFile:"/home/jenkins/minikube-integration/22353-9552/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), Nex
tProtos:[]string(nil)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x2780200), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I1229 06:53:51.504963 17440 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I1229 06:53:51.504986 17440 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I1229 06:53:51.504992 17440 envvar.go:172] "Feature gate default state" feature="InOrderInformersBatchProcess" enabled=true
I1229 06:53:51.504998 17440 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=true
I1229 06:53:51.505004 17440 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=true
I1229 06:53:51.505012 17440 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I1229 06:53:51.505089 17440 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I1229 06:53:51.505414 17440 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1229 06:53:51.521999 17440 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.39.121
I1229 06:53:51.522047 17440 kubeadm.go:1161] stopping kube-system containers ...
I1229 06:53:51.522115 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1229 06:53:51.550376 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:53:51.550407 17440 command_runner.go:130] > a014f32abcd0
I1229 06:53:51.550415 17440 command_runner.go:130] > d81259f64136
I1229 06:53:51.550422 17440 command_runner.go:130] > fb6db97d8ffe
I1229 06:53:51.550432 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:53:51.550441 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:53:51.550448 17440 command_runner.go:130] > 4ed279733477
I1229 06:53:51.550455 17440 command_runner.go:130] > 1fc5fa7d9295
I1229 06:53:51.550462 17440 command_runner.go:130] > 98261fa185f6
I1229 06:53:51.550470 17440 command_runner.go:130] > b046056ff071
I1229 06:53:51.550478 17440 command_runner.go:130] > b3cc8048f6d9
I1229 06:53:51.550485 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:53:51.550491 17440 command_runner.go:130] > 64853b50a6c5
I1229 06:53:51.550496 17440 command_runner.go:130] > bd7d900efd48
I1229 06:53:51.550505 17440 command_runner.go:130] > 8911777281f4
I1229 06:53:51.550511 17440 command_runner.go:130] > a123d63a8edb
I1229 06:53:51.550516 17440 command_runner.go:130] > 548561c7ada8
I1229 06:53:51.550521 17440 command_runner.go:130] > fd22eb0d6c14
I1229 06:53:51.550528 17440 command_runner.go:130] > 14aafc386533
I1229 06:53:51.550540 17440 command_runner.go:130] > abbe46bd960e
I1229 06:53:51.550548 17440 command_runner.go:130] > 4b032678478a
I1229 06:53:51.550556 17440 command_runner.go:130] > 0af491ef7c2f
I1229 06:53:51.550566 17440 command_runner.go:130] > 5024b03252e3
I1229 06:53:51.550572 17440 command_runner.go:130] > fe7b5da2f7fb
I1229 06:53:51.550582 17440 command_runner.go:130] > ad82b94f7629
I1229 06:53:51.552420 17440 docker.go:487] Stopping containers: [6f69ba6a1553 a014f32abcd0 d81259f64136 fb6db97d8ffe 17fe16a2822a a79d99ad3fde 4ed279733477 1fc5fa7d9295 98261fa185f6 b046056ff071 b3cc8048f6d9 bd96b57aa9fc 64853b50a6c5 bd7d900efd48 8911777281f4 a123d63a8edb 548561c7ada8 fd22eb0d6c14 14aafc386533 abbe46bd960e 4b032678478a 0af491ef7c2f 5024b03252e3 fe7b5da2f7fb ad82b94f7629]
I1229 06:53:51.552499 17440 ssh_runner.go:195] Run: docker stop 6f69ba6a1553 a014f32abcd0 d81259f64136 fb6db97d8ffe 17fe16a2822a a79d99ad3fde 4ed279733477 1fc5fa7d9295 98261fa185f6 b046056ff071 b3cc8048f6d9 bd96b57aa9fc 64853b50a6c5 bd7d900efd48 8911777281f4 a123d63a8edb 548561c7ada8 fd22eb0d6c14 14aafc386533 abbe46bd960e 4b032678478a 0af491ef7c2f 5024b03252e3 fe7b5da2f7fb ad82b94f7629
I1229 06:53:51.976888 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:53:51.976911 17440 command_runner.go:130] > a014f32abcd0
I1229 06:53:58.789216 17440 command_runner.go:130] > d81259f64136
I1229 06:53:58.789240 17440 command_runner.go:130] > fb6db97d8ffe
I1229 06:53:58.789248 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:53:58.789252 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:53:58.789256 17440 command_runner.go:130] > 4ed279733477
I1229 06:53:58.789259 17440 command_runner.go:130] > 1fc5fa7d9295
I1229 06:53:58.789262 17440 command_runner.go:130] > 98261fa185f6
I1229 06:53:58.789266 17440 command_runner.go:130] > b046056ff071
I1229 06:53:58.789269 17440 command_runner.go:130] > b3cc8048f6d9
I1229 06:53:58.789272 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:53:58.789275 17440 command_runner.go:130] > 64853b50a6c5
I1229 06:53:58.789278 17440 command_runner.go:130] > bd7d900efd48
I1229 06:53:58.789281 17440 command_runner.go:130] > 8911777281f4
I1229 06:53:58.789284 17440 command_runner.go:130] > a123d63a8edb
I1229 06:53:58.789287 17440 command_runner.go:130] > 548561c7ada8
I1229 06:53:58.789295 17440 command_runner.go:130] > fd22eb0d6c14
I1229 06:53:58.789299 17440 command_runner.go:130] > 14aafc386533
I1229 06:53:58.789303 17440 command_runner.go:130] > abbe46bd960e
I1229 06:53:58.789306 17440 command_runner.go:130] > 4b032678478a
I1229 06:53:58.789310 17440 command_runner.go:130] > 0af491ef7c2f
I1229 06:53:58.789314 17440 command_runner.go:130] > 5024b03252e3
I1229 06:53:58.789317 17440 command_runner.go:130] > fe7b5da2f7fb
I1229 06:53:58.789321 17440 command_runner.go:130] > ad82b94f7629
I1229 06:53:58.790986 17440 ssh_runner.go:235] Completed: docker stop 6f69ba6a1553 a014f32abcd0 d81259f64136 fb6db97d8ffe 17fe16a2822a a79d99ad3fde 4ed279733477 1fc5fa7d9295 98261fa185f6 b046056ff071 b3cc8048f6d9 bd96b57aa9fc 64853b50a6c5 bd7d900efd48 8911777281f4 a123d63a8edb 548561c7ada8 fd22eb0d6c14 14aafc386533 abbe46bd960e 4b032678478a 0af491ef7c2f 5024b03252e3 fe7b5da2f7fb ad82b94f7629: (7.238443049s)
I1229 06:53:58.791057 17440 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1229 06:53:58.833953 17440 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1229 06:53:58.857522 17440 command_runner.go:130] > -rw------- 1 root root 5635 Dec 29 06:52 /etc/kubernetes/admin.conf
I1229 06:53:58.857550 17440 command_runner.go:130] > -rw------- 1 root root 5638 Dec 29 06:52 /etc/kubernetes/controller-manager.conf
I1229 06:53:58.857561 17440 command_runner.go:130] > -rw------- 1 root root 1974 Dec 29 06:52 /etc/kubernetes/kubelet.conf
I1229 06:53:58.857571 17440 command_runner.go:130] > -rw------- 1 root root 5590 Dec 29 06:52 /etc/kubernetes/scheduler.conf
I1229 06:53:58.857610 17440 kubeadm.go:158] found existing configuration files:
-rw------- 1 root root 5635 Dec 29 06:52 /etc/kubernetes/admin.conf
-rw------- 1 root root 5638 Dec 29 06:52 /etc/kubernetes/controller-manager.conf
-rw------- 1 root root 1974 Dec 29 06:52 /etc/kubernetes/kubelet.conf
-rw------- 1 root root 5590 Dec 29 06:52 /etc/kubernetes/scheduler.conf
I1229 06:53:58.857671 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1229 06:53:58.875294 17440 command_runner.go:130] > server: https://control-plane.minikube.internal:8441
I1229 06:53:58.876565 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1229 06:53:58.896533 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 1
stdout:
stderr:
I1229 06:53:58.896617 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1229 06:53:58.917540 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1229 06:53:58.936703 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
stdout:
stderr:
I1229 06:53:58.936777 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1229 06:53:58.957032 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1229 06:53:58.970678 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
stdout:
stderr:
I1229 06:53:58.970742 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1229 06:53:58.992773 17440 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1229 06:53:59.007767 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:53:59.061402 17440 command_runner.go:130] > [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1229 06:53:59.061485 17440 command_runner.go:130] > [certs] Using existing ca certificate authority
I1229 06:53:59.061525 17440 command_runner.go:130] > [certs] Using existing apiserver certificate and key on disk
I1229 06:53:59.061923 17440 command_runner.go:130] > [certs] Using existing apiserver-kubelet-client certificate and key on disk
I1229 06:53:59.062217 17440 command_runner.go:130] > [certs] Using existing front-proxy-ca certificate authority
I1229 06:53:59.062329 17440 command_runner.go:130] > [certs] Using existing front-proxy-client certificate and key on disk
I1229 06:53:59.062606 17440 command_runner.go:130] > [certs] Using existing etcd/ca certificate authority
I1229 06:53:59.062852 17440 command_runner.go:130] > [certs] Using existing etcd/server certificate and key on disk
I1229 06:53:59.062948 17440 command_runner.go:130] > [certs] Using existing etcd/peer certificate and key on disk
I1229 06:53:59.063179 17440 command_runner.go:130] > [certs] Using existing etcd/healthcheck-client certificate and key on disk
I1229 06:53:59.063370 17440 command_runner.go:130] > [certs] Using existing apiserver-etcd-client certificate and key on disk
I1229 06:53:59.063615 17440 command_runner.go:130] > [certs] Using the existing "sa" key
I1229 06:53:59.066703 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:53:59.686012 17440 command_runner.go:130] > [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1229 06:53:59.686050 17440 command_runner.go:130] > [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
I1229 06:53:59.686059 17440 command_runner.go:130] > [kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/super-admin.conf"
I1229 06:53:59.686069 17440 command_runner.go:130] > [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1229 06:53:59.686078 17440 command_runner.go:130] > [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1229 06:53:59.686087 17440 command_runner.go:130] > [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1229 06:53:59.686203 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:53:59.995495 17440 command_runner.go:130] > [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1229 06:53:59.995529 17440 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1229 06:53:59.995539 17440 command_runner.go:130] > [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1229 06:53:59.995545 17440 command_runner.go:130] > [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1229 06:53:59.995549 17440 command_runner.go:130] > [kubelet-start] Starting the kubelet
I1229 06:53:59.995615 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:54:00.047957 17440 command_runner.go:130] > [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1229 06:54:00.047983 17440 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-apiserver"
I1229 06:54:00.053966 17440 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1229 06:54:00.056537 17440 command_runner.go:130] > [control-plane] Creating static Pod manifest for "kube-scheduler"
I1229 06:54:00.059558 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1229 06:54:00.175745 17440 command_runner.go:130] > [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1229 06:54:00.175825 17440 api_server.go:52] waiting for apiserver process to appear ...
I1229 06:54:00.175893 17440 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1229 06:54:00.233895 17440 command_runner.go:130] > 2416
I1229 06:54:00.233940 17440 api_server.go:72] duration metric: took 58.126409ms to wait for apiserver process to appear ...
I1229 06:54:00.233953 17440 api_server.go:88] waiting for apiserver healthz status ...
I1229 06:54:00.233976 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:05.236821 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:05.236865 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:10.239922 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:10.239956 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:15.242312 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:15.242347 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:20.245667 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:20.245726 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:25.248449 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:25.248501 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:30.249241 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:30.249279 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:35.251737 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:35.251771 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:40.254366 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:40.254407 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:45.257232 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:45.257275 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:50.259644 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:50.259685 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:54:55.261558 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:54:55.261592 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:55:00.263123 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:55:00.263241 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I1229 06:55:00.287429 17440 command_runner.go:130] > fb6db97d8ffe
I1229 06:55:00.288145 17440 logs.go:282] 1 containers: [fb6db97d8ffe]
I1229 06:55:00.288289 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I1229 06:55:00.310519 17440 command_runner.go:130] > d81259f64136
I1229 06:55:00.310561 17440 logs.go:282] 1 containers: [d81259f64136]
I1229 06:55:00.310630 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I1229 06:55:00.334579 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:55:00.334624 17440 logs.go:282] 1 containers: [6f69ba6a1553]
I1229 06:55:00.334692 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I1229 06:55:00.353472 17440 command_runner.go:130] > 4d49952084c9
I1229 06:55:00.353503 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:55:00.354626 17440 logs.go:282] 2 containers: [4d49952084c9 a79d99ad3fde]
I1229 06:55:00.354714 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I1229 06:55:00.376699 17440 command_runner.go:130] > 8911777281f4
I1229 06:55:00.378105 17440 logs.go:282] 1 containers: [8911777281f4]
I1229 06:55:00.378188 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I1229 06:55:00.397976 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:55:00.399617 17440 logs.go:282] 1 containers: [17fe16a2822a]
I1229 06:55:00.399707 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I1229 06:55:00.419591 17440 logs.go:282] 0 containers: []
W1229 06:55:00.419617 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:55:00.419665 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I1229 06:55:00.440784 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:55:00.441985 17440 logs.go:282] 1 containers: [bd96b57aa9fc]
I1229 06:55:00.442020 17440 logs.go:123] Gathering logs for kube-controller-manager [17fe16a2822a] ...
I1229 06:55:00.442030 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 17fe16a2822a"
I1229 06:55:00.465151 17440 command_runner.go:130] ! I1229 06:53:51.283329 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:55:00.465192 17440 command_runner.go:130] ! I1229 06:53:51.303666 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:55:00.465226 17440 command_runner.go:130] ! I1229 06:53:51.303706 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.465237 17440 command_runner.go:130] ! I1229 06:53:51.307865 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:55:00.465255 17440 command_runner.go:130] ! I1229 06:53:51.308287 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:55:00.465271 17440 command_runner.go:130] ! I1229 06:53:51.309479 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:55:00.465285 17440 command_runner.go:130] ! I1229 06:53:51.309545 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:55:00.465823 17440 logs.go:123] Gathering logs for coredns [6f69ba6a1553] ...
I1229 06:55:00.465845 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6f69ba6a1553"
I1229 06:55:00.487618 17440 logs.go:123] Gathering logs for kube-scheduler [4d49952084c9] ...
I1229 06:55:00.487646 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4d49952084c9"
I1229 06:55:00.508432 17440 command_runner.go:130] ! I1229 06:53:52.882050 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:55:00.508468 17440 command_runner.go:130] ! W1229 06:54:52.896472 1 authentication.go:397] Error looking up in-cluster authentication configuration: the server was unable to return a response in the time allotted, but may still be processing the request (get configmaps extension-apiserver-authentication)
I1229 06:55:00.508482 17440 command_runner.go:130] ! W1229 06:54:52.896499 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
I1229 06:55:00.508508 17440 command_runner.go:130] ! W1229 06:54:52.896506 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1229 06:55:00.508521 17440 command_runner.go:130] ! I1229 06:54:52.913597 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0"
I1229 06:55:00.508529 17440 command_runner.go:130] ! I1229 06:54:52.913622 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.508541 17440 command_runner.go:130] ! I1229 06:54:52.915784 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1229 06:55:00.508551 17440 command_runner.go:130] ! I1229 06:54:52.915816 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:55:00.508560 17440 command_runner.go:130] ! I1229 06:54:52.915823 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1229 06:55:00.508568 17440 command_runner.go:130] ! I1229 06:54:52.915940 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:55:00.510308 17440 logs.go:123] Gathering logs for kube-scheduler [a79d99ad3fde] ...
I1229 06:55:00.510337 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a79d99ad3fde"
I1229 06:55:00.531862 17440 command_runner.go:130] ! I1229 06:53:51.269699 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:55:00.532900 17440 logs.go:123] Gathering logs for kube-proxy [8911777281f4] ...
I1229 06:55:00.532924 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 8911777281f4"
I1229 06:55:00.554051 17440 command_runner.go:130] ! I1229 06:52:47.703648 1 server_linux.go:53] "Using iptables proxy"
I1229 06:55:00.554084 17440 command_runner.go:130] ! I1229 06:52:47.791676 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:55:00.554095 17440 command_runner.go:130] ! I1229 06:52:47.897173 1 shared_informer.go:377] "Caches are synced"
I1229 06:55:00.554109 17440 command_runner.go:130] ! I1229 06:52:47.900073 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.121"]
I1229 06:55:00.554131 17440 command_runner.go:130] ! E1229 06:52:47.906310 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1229 06:55:00.554148 17440 command_runner.go:130] ! I1229 06:52:48.206121 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
I1229 06:55:00.554170 17440 command_runner.go:130] ! error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
I1229 06:55:00.554189 17440 command_runner.go:130] ! Perhaps ip6tables or your kernel needs to be upgraded.
I1229 06:55:00.554195 17440 command_runner.go:130] ! >
I1229 06:55:00.554208 17440 command_runner.go:130] ! I1229 06:52:48.209509 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1229 06:55:00.554224 17440 command_runner.go:130] ! I1229 06:52:48.210145 1 server_linux.go:136] "Using iptables Proxier"
I1229 06:55:00.554250 17440 command_runner.go:130] ! I1229 06:52:48.253805 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1229 06:55:00.554261 17440 command_runner.go:130] ! I1229 06:52:48.255046 1 server.go:529] "Version info" version="v1.35.0"
I1229 06:55:00.554273 17440 command_runner.go:130] ! I1229 06:52:48.255076 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.554316 17440 command_runner.go:130] ! I1229 06:52:48.262205 1 config.go:200] "Starting service config controller"
I1229 06:55:00.554327 17440 command_runner.go:130] ! I1229 06:52:48.262238 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1229 06:55:00.554339 17440 command_runner.go:130] ! I1229 06:52:48.262258 1 config.go:106] "Starting endpoint slice config controller"
I1229 06:55:00.554350 17440 command_runner.go:130] ! I1229 06:52:48.262261 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1229 06:55:00.554366 17440 command_runner.go:130] ! I1229 06:52:48.262278 1 config.go:403] "Starting serviceCIDR config controller"
I1229 06:55:00.554381 17440 command_runner.go:130] ! I1229 06:52:48.262282 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1229 06:55:00.554390 17440 command_runner.go:130] ! I1229 06:52:48.270608 1 config.go:309] "Starting node config controller"
I1229 06:55:00.554402 17440 command_runner.go:130] ! I1229 06:52:48.271311 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1229 06:55:00.554414 17440 command_runner.go:130] ! I1229 06:52:48.271337 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1229 06:55:00.554427 17440 command_runner.go:130] ! I1229 06:52:48.363324 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1229 06:55:00.554437 17440 command_runner.go:130] ! I1229 06:52:48.363427 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1229 06:55:00.554452 17440 command_runner.go:130] ! I1229 06:52:48.363671 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1229 06:55:00.556555 17440 logs.go:123] Gathering logs for storage-provisioner [bd96b57aa9fc] ...
I1229 06:55:00.556578 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 bd96b57aa9fc"
I1229 06:55:00.581812 17440 command_runner.go:130] ! I1229 06:52:48.539098 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1229 06:55:00.581848 17440 command_runner.go:130] ! I1229 06:52:48.550309 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1229 06:55:00.581857 17440 command_runner.go:130] ! I1229 06:52:48.550373 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1229 06:55:00.581865 17440 command_runner.go:130] ! W1229 06:52:48.552935 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581874 17440 command_runner.go:130] ! W1229 06:52:48.563735 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581881 17440 command_runner.go:130] ! I1229 06:52:48.564362 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1229 06:55:00.581890 17440 command_runner.go:130] ! I1229 06:52:48.565422 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:55:00.581911 17440 command_runner.go:130] ! I1229 06:52:48.565143 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cfb65e16-c2f7-4c19-a059-8ef64f8f3f2e", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868 became leader
I1229 06:55:00.581919 17440 command_runner.go:130] ! W1229 06:52:48.576668 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581930 17440 command_runner.go:130] ! W1229 06:52:48.582743 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581942 17440 command_runner.go:130] ! I1229 06:52:48.665711 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:55:00.581949 17440 command_runner.go:130] ! W1229 06:52:50.588204 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581957 17440 command_runner.go:130] ! W1229 06:52:50.593815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581964 17440 command_runner.go:130] ! W1229 06:52:52.597431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581975 17440 command_runner.go:130] ! W1229 06:52:52.602815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581985 17440 command_runner.go:130] ! W1229 06:52:54.606663 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.581993 17440 command_runner.go:130] ! W1229 06:52:54.612650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582003 17440 command_runner.go:130] ! W1229 06:52:56.616395 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582010 17440 command_runner.go:130] ! W1229 06:52:56.622404 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582020 17440 command_runner.go:130] ! W1229 06:52:58.626804 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582030 17440 command_runner.go:130] ! W1229 06:52:58.637257 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582037 17440 command_runner.go:130] ! W1229 06:53:00.640728 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582044 17440 command_runner.go:130] ! W1229 06:53:00.646446 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582051 17440 command_runner.go:130] ! W1229 06:53:02.650659 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582070 17440 command_runner.go:130] ! W1229 06:53:02.660374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582080 17440 command_runner.go:130] ! W1229 06:53:04.664091 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582088 17440 command_runner.go:130] ! W1229 06:53:04.669806 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582097 17440 command_runner.go:130] ! W1229 06:53:06.674203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582105 17440 command_runner.go:130] ! W1229 06:53:06.680002 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582115 17440 command_runner.go:130] ! W1229 06:53:08.683483 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582125 17440 command_runner.go:130] ! W1229 06:53:08.688934 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582141 17440 command_runner.go:130] ! W1229 06:53:10.693644 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582152 17440 command_runner.go:130] ! W1229 06:53:10.706122 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582160 17440 command_runner.go:130] ! W1229 06:53:12.709949 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582170 17440 command_runner.go:130] ! W1229 06:53:12.715753 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582177 17440 command_runner.go:130] ! W1229 06:53:14.719191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582186 17440 command_runner.go:130] ! W1229 06:53:14.728100 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582193 17440 command_runner.go:130] ! W1229 06:53:16.731658 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582203 17440 command_runner.go:130] ! W1229 06:53:16.737463 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582211 17440 command_runner.go:130] ! W1229 06:53:18.741304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582221 17440 command_runner.go:130] ! W1229 06:53:18.746708 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582228 17440 command_runner.go:130] ! W1229 06:53:20.749662 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582235 17440 command_runner.go:130] ! W1229 06:53:20.755989 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582242 17440 command_runner.go:130] ! W1229 06:53:22.760815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582252 17440 command_runner.go:130] ! W1229 06:53:22.772421 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582261 17440 command_runner.go:130] ! W1229 06:53:24.776403 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582269 17440 command_runner.go:130] ! W1229 06:53:24.783232 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582276 17440 command_runner.go:130] ! W1229 06:53:26.786665 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582287 17440 command_runner.go:130] ! W1229 06:53:26.792239 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582294 17440 command_runner.go:130] ! W1229 06:53:28.796420 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582302 17440 command_runner.go:130] ! W1229 06:53:28.805511 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582312 17440 command_runner.go:130] ! W1229 06:53:30.808544 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582319 17440 command_runner.go:130] ! W1229 06:53:30.816066 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582329 17440 command_runner.go:130] ! W1229 06:53:32.820090 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582336 17440 command_runner.go:130] ! W1229 06:53:32.826208 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582346 17440 command_runner.go:130] ! W1229 06:53:34.829865 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582353 17440 command_runner.go:130] ! W1229 06:53:34.835774 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582363 17440 command_runner.go:130] ! W1229 06:53:36.839291 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582370 17440 command_runner.go:130] ! W1229 06:53:36.853251 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582378 17440 command_runner.go:130] ! W1229 06:53:38.856432 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.582385 17440 command_runner.go:130] ! W1229 06:53:38.862360 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:55:00.586872 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:55:00.586916 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:55:00.609702 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.609731 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.609766 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.609784 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.609811 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.609822 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:55:00.609831 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:55:00.609842 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:55:00.609848 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.609857 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:55:00.609865 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.609879 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.609890 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.609906 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.609915 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.609923 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:55:00.609943 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.609954 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:55:00.609966 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.609976 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:55:00.609983 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:55:00.609990 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:55:00.609998 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.610006 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.610016 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.610024 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.610041 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.610050 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.610070 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.610082 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.610091 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.610100 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.610107 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:55:00.610115 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:55:00.610123 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:55:00.610131 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:55:00.610141 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:55:00.610152 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:55:00.610159 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:55:00.610168 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:55:00.610179 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:55:00.610191 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:55:00.610203 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.610216 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.610223 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:55:00.610231 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:55:00.610242 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:55:00.610251 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:55:00.610258 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:55:00.610265 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:55:00.610271 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.610281 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:55:00.610290 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.610303 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.610323 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.610335 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.610345 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.610355 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:55:00.610374 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.610384 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:55:00.610394 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.610404 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:55:00.610412 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.610422 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.610429 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.610439 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.610447 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.610455 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.610461 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:55:00.610470 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:55:00.610476 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:55:00.610483 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.610491 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.610500 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.610508 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.610516 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:55:00.610523 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:55:00.610531 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:55:00.610538 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:55:00.610550 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:55:00.610559 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:55:00.610567 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:55:00.610573 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:55:00.610579 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:55:00.610595 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:55:00.610607 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:55:00.610615 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:55:00.610622 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:55:00.610630 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:55:00.610637 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.610644 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:55:00.610653 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.610669 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.610680 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.610692 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.610705 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.610713 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:55:00.610735 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.610744 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:55:00.610755 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.610765 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:55:00.610772 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.610781 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.610789 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.610809 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.610818 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.610824 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.610853 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610867 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610881 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610896 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610909 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:55:00.610922 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610936 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610949 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610964 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.610979 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.610995 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611010 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.611021 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:55:00.611037 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:55:00.611048 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:55:00.611062 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:55:00.611070 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:55:00.611079 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:55:00.611087 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:55:00.611096 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:55:00.611102 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:55:00.611109 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:55:00.611118 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:55:00.611125 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:55:00.611135 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:55:00.611146 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:55:00.611157 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:55:00.611167 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:55:00.611179 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:55:00.611186 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:55:00.611199 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611213 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611226 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611241 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611266 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611281 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611295 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611310 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611325 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611342 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611355 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611370 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611382 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.611404 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:55:00.611417 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:55:00.611435 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:55:00.611449 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:55:00.611464 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:55:00.611476 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:55:00.611491 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:55:00.611502 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:55:00.611517 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:55:00.611529 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:55:00.611544 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:55:00.611558 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:55:00.611574 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:55:00.611586 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:55:00.611601 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:55:00.611617 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:55:00.611631 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:55:00.611645 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:55:00.611660 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:55:00.611674 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:55:00.611689 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:55:00.611702 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:55:00.611712 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:55:00.611722 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:55:00.611732 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:55:00.611740 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:55:00.611751 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:55:00.611759 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:55:00.611767 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:55:00.611835 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:55:00.611849 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:55:00.611867 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:55:00.611877 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:55:00.611888 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:55:00.611894 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:55:00.611901 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:55:00.611909 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:55:00.611917 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:55:00.611929 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:55:00.611937 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:55:00.611946 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:55:00.611954 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:55:00.611963 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:55:00.611971 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:55:00.611981 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:55:00.611990 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:55:00.611999 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:55:00.612006 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:55:00.612019 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612031 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612046 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612063 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612079 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612093 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612112 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:55:00.612128 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612142 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612157 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612171 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612185 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612201 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612217 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612230 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612245 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612259 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612274 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612293 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:55:00.612309 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612323 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:55:00.612338 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:55:00.612354 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:55:00.612366 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:55:00.612380 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:55:00.612394 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.612407 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:55:00.629261 17440 logs.go:123] Gathering logs for container status ...
I1229 06:55:00.629293 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:55:00.671242 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
I1229 06:55:00.671279 17440 command_runner.go:130] > 4d49952084c92 550794e3b12ac About a minute ago Running kube-scheduler 2 fefef7c5591ea kube-scheduler-functional-695625 kube-system
I1229 06:55:00.671293 17440 command_runner.go:130] > 6f69ba6a1553a aa5e3ebc0dfed About a minute ago Exited coredns 1 a014f32abcd01 coredns-7d764666f9-wfq7m kube-system
I1229 06:55:00.671303 17440 command_runner.go:130] > d81259f64136c 0a108f7189562 About a minute ago Exited etcd 1 1fc5fa7d92959 etcd-functional-695625 kube-system
I1229 06:55:00.671315 17440 command_runner.go:130] > fb6db97d8ffe4 5c6acd67e9cd1 About a minute ago Exited kube-apiserver 1 4ed2797334771 kube-apiserver-functional-695625 kube-system
I1229 06:55:00.671327 17440 command_runner.go:130] > 17fe16a2822a8 2c9a4b058bd7e About a minute ago Exited kube-controller-manager 1 98261fa185f6e kube-controller-manager-functional-695625 kube-system
I1229 06:55:00.671337 17440 command_runner.go:130] > a79d99ad3fde3 550794e3b12ac About a minute ago Exited kube-scheduler 1 b046056ff071b kube-scheduler-functional-695625 kube-system
I1229 06:55:00.671347 17440 command_runner.go:130] > bd96b57aa9fce 6e38f40d628db 2 minutes ago Exited storage-provisioner 0 64853b50a6c5e storage-provisioner kube-system
I1229 06:55:00.671362 17440 command_runner.go:130] > 8911777281f41 32652ff1bbe6b 2 minutes ago Exited kube-proxy 0 548561c7ada8f kube-proxy-g7lp9 kube-system
I1229 06:55:00.673604 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:55:00.673628 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:55:00.695836 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.077121 2634 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:55:00.695863 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.077418 2634 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:55:00.695877 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.077955 2634 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:55:00.695887 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.109084 2634 factory.go:223] Registration of the containerd container factory successfully
I1229 06:55:00.695901 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.135073 2634 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:55:00.695910 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.137245 2634 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:55:00.695920 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.137294 2634 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:55:00.695934 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.137340 2634 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:55:00.695942 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.209773 2634 cpu_manager.go:225] "Starting" policy="none"
I1229 06:55:00.695952 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.209976 2634 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:55:00.695962 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210050 2634 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:55:00.695975 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210361 2634 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:55:00.696001 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210374 2634 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:55:00.696011 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210392 2634 policy_none.go:50] "Start"
I1229 06:55:00.696020 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210408 2634 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:55:00.696029 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210421 2634 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:55:00.696038 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210527 2634 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:55:00.696045 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.210534 2634 policy_none.go:44] "Start"
I1229 06:55:00.696056 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.219245 2634 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:55:00.696067 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.220437 2634 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:55:00.696078 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.220456 2634 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:55:00.696089 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.221071 2634 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:55:00.696114 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.226221 2634 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:55:00.696126 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.239387 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696144 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.239974 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696155 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.240381 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696165 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.262510 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696185 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283041 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696208 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283087 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696228 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283118 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696247 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283135 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696268 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283151 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696288 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283163 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696309 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283175 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696329 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283189 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696357 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283209 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696378 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283223 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696400 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.283249 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696416 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.285713 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-controller-manager-functional-695625\" already exists" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696428 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.290012 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-scheduler-functional-695625\" already exists" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696442 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.290269 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"etcd-functional-695625\" already exists" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696454 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: E1229 06:52:41.304300 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-apiserver-functional-695625\" already exists" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696466 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.336817 2634 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:55:00.696475 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.351321 2634 kubelet_node_status.go:123] "Node was previously registered" node="functional-695625"
I1229 06:55:00.696486 17440 command_runner.go:130] > Dec 29 06:52:41 functional-695625 kubelet[2634]: I1229 06:52:41.351415 2634 kubelet_node_status.go:77] "Successfully registered node" node="functional-695625"
I1229 06:55:00.696493 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.033797 2634 apiserver.go:52] "Watching apiserver"
I1229 06:55:00.696503 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.077546 2634 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
I1229 06:55:00.696527 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.181689 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/etcd-functional-695625" podStartSLOduration=3.181660018 podStartE2EDuration="3.181660018s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.180947341 +0000 UTC m=+1.223544146" watchObservedRunningTime="2025-12-29 06:52:42.181660018 +0000 UTC m=+1.224256834"
I1229 06:55:00.696555 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.221952 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-controller-manager-functional-695625" podStartSLOduration=3.221936027 podStartE2EDuration="3.221936027s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.202120755 +0000 UTC m=+1.244717560" watchObservedRunningTime="2025-12-29 06:52:42.221936027 +0000 UTC m=+1.264532905"
I1229 06:55:00.696583 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.238774 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-apiserver-functional-695625" podStartSLOduration=3.238759924 podStartE2EDuration="3.238759924s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.238698819 +0000 UTC m=+1.281295638" watchObservedRunningTime="2025-12-29 06:52:42.238759924 +0000 UTC m=+1.281356744"
I1229 06:55:00.696609 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.238905 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-scheduler-functional-695625" podStartSLOduration=3.238868136 podStartE2EDuration="3.238868136s" podCreationTimestamp="2025-12-29 06:52:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:42.224445467 +0000 UTC m=+1.267042290" watchObservedRunningTime="2025-12-29 06:52:42.238868136 +0000 UTC m=+1.281464962"
I1229 06:55:00.696622 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.266475 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696634 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.266615 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696651 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.266971 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696664 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: I1229 06:52:42.267487 2634 kubelet.go:3340] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696678 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.287234 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-controller-manager-functional-695625\" already exists" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.696690 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.287316 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.696704 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.292837 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-apiserver-functional-695625\" already exists" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.696718 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.293863 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.696730 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.293764 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"kube-scheduler-functional-695625\" already exists" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.696745 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.294163 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.696757 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.298557 2634 kubelet.go:3342] "Failed creating a mirror pod" err="pods \"etcd-functional-695625\" already exists" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.696770 17440 command_runner.go:130] > Dec 29 06:52:42 functional-695625 kubelet[2634]: E1229 06:52:42.298633 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.696782 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.272537 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.696807 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.273148 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.696835 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.273501 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.696850 17440 command_runner.go:130] > Dec 29 06:52:43 functional-695625 kubelet[2634]: E1229 06:52:43.273627 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.696863 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: E1229 06:52:44.279056 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.696877 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: E1229 06:52:44.279353 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.696887 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: I1229 06:52:44.754123 2634 kuberuntime_manager.go:2062] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
I1229 06:55:00.696899 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 kubelet[2634]: I1229 06:52:44.756083 2634 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
I1229 06:55:00.696917 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.407560 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94mg5\" (UniqueName: \"kubernetes.io/projected/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-api-access-94mg5\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696938 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.408503 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-proxy\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696958 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.408957 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-xtables-lock\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696976 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: I1229 06:52:45.409131 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-lib-modules\") pod \"kube-proxy-g7lp9\" (UID: \"9c2c2ac1-7fa0-427d-b78e-ee14e169895a\") " pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.696991 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: E1229 06:52:45.528153 2634 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found
I1229 06:55:00.697004 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: E1229 06:52:45.528186 2634 projected.go:196] Error preparing data for projected volume kube-api-access-94mg5 for pod kube-system/kube-proxy-g7lp9: configmap "kube-root-ca.crt" not found
I1229 06:55:00.697032 17440 command_runner.go:130] > Dec 29 06:52:45 functional-695625 kubelet[2634]: E1229 06:52:45.528293 2634 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-api-access-94mg5 podName:9c2c2ac1-7fa0-427d-b78e-ee14e169895a nodeName:}" failed. No retries permitted until 2025-12-29 06:52:46.028266861 +0000 UTC m=+5.070863673 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-94mg5" (UniqueName: "kubernetes.io/projected/9c2c2ac1-7fa0-427d-b78e-ee14e169895a-kube-api-access-94mg5") pod "kube-proxy-g7lp9" (UID: "9c2c2ac1-7fa0-427d-b78e-ee14e169895a") : configmap "kube-root-ca.crt" not found
I1229 06:55:00.697044 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.406131 2634 kubelet_node_status.go:427] "Fast updating node status as it just became ready"
I1229 06:55:00.697064 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519501 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b64sn\" (UniqueName: \"kubernetes.io/projected/00a95e37-1394-45a7-a376-b195e31e3e9c-kube-api-access-b64sn\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:55:00.697084 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519550 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00a95e37-1394-45a7-a376-b195e31e3e9c-config-volume\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:55:00.697104 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519571 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:55:00.697124 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519587 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:55:00.697138 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.411642 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605"
I1229 06:55:00.697151 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.545186 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.697170 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731196 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/b5f201ca-6d54-4e15-9584-396fb1486f3c-tmp\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:55:00.697192 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731252 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghc5d\" (UniqueName: \"kubernetes.io/projected/b5f201ca-6d54-4e15-9584-396fb1486f3c-kube-api-access-ghc5d\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:55:00.697206 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.628275 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697229 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.634714 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-9mrnn" podStartSLOduration=2.634698273 podStartE2EDuration="2.634698273s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.631484207 +0000 UTC m=+7.674081027" watchObservedRunningTime="2025-12-29 06:52:48.634698273 +0000 UTC m=+7.677295093"
I1229 06:55:00.697245 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.649761 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.697268 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.694857 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-wfq7m" podStartSLOduration=2.694842541 podStartE2EDuration="2.694842541s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.672691157 +0000 UTC m=+7.715287974" watchObservedRunningTime="2025-12-29 06:52:48.694842541 +0000 UTC m=+7.737439360"
I1229 06:55:00.697296 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.728097 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.728082592 podStartE2EDuration="1.728082592s" podCreationTimestamp="2025-12-29 06:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.696376688 +0000 UTC m=+7.738973499" watchObservedRunningTime="2025-12-29 06:52:48.728082592 +0000 UTC m=+7.770679413"
I1229 06:55:00.697310 17440 command_runner.go:130] > Dec 29 06:52:49 functional-695625 kubelet[2634]: E1229 06:52:49.674249 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697322 17440 command_runner.go:130] > Dec 29 06:52:50 functional-695625 kubelet[2634]: E1229 06:52:50.680852 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697336 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.223368 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.697361 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: I1229 06:52:52.243928 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-proxy-g7lp9" podStartSLOduration=7.243911092 podStartE2EDuration="7.243911092s" podCreationTimestamp="2025-12-29 06:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.744380777 +0000 UTC m=+7.786977597" watchObservedRunningTime="2025-12-29 06:52:52.243911092 +0000 UTC m=+11.286507895"
I1229 06:55:00.697376 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.396096 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.697388 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.693687 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.697402 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: E1229 06:52:53.390926 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.697420 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979173 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:55:00.697442 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979225 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:55:00.697463 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979732 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue ""
I1229 06:55:00.697483 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.981248 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "kube-api-access-lc5xj". PluginName "kubernetes.io/projected", VolumeGIDValue ""
I1229 06:55:00.697499 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079447 2634 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") on node \"functional-695625\" DevicePath \"\""
I1229 06:55:00.697515 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079521 2634 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") on node \"functional-695625\" DevicePath \"\""
I1229 06:55:00.697526 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.715729 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697536 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.756456 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697554 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: E1229 06:52:54.758451 2634 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697576 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.758508 2634 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"} err="failed to get container status \"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:55:00.697591 17440 command_runner.go:130] > Dec 29 06:52:55 functional-695625 kubelet[2634]: I1229 06:52:55.144582 2634 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="c4313c5f-3b86-48de-8f3c-02d7e007542a" path="/var/lib/kubelet/pods/c4313c5f-3b86-48de-8f3c-02d7e007542a/volumes"
I1229 06:55:00.697608 17440 command_runner.go:130] > Dec 29 06:52:58 functional-695625 kubelet[2634]: E1229 06:52:58.655985 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.697621 17440 command_runner.go:130] > Dec 29 06:53:20 functional-695625 kubelet[2634]: E1229 06:53:20.683378 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.697637 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913108 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697651 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913180 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697669 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913193 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697710 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915141 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697726 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915181 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697746 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915192 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697762 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139490 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.697775 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139600 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697790 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139623 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697815 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139634 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697830 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917175 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697846 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917271 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697860 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917284 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697876 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918722 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.697892 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918780 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697906 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918792 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697923 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139097 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.697937 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139170 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697951 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139187 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697966 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139214 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.697986 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921730 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698002 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921808 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698029 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921823 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698046 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.923664 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698060 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924161 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698081 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924185 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698097 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139396 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698113 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139458 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698126 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139472 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698141 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139485 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698155 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239338 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698172 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239383 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698187 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244411 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:55:00.698202 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244504 2634 container_log_manager.go:154] "Failed to rotate container logs" err="failed to list containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698218 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458139 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:55:00.698235 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458218 2634 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to list pod stats: failed to get pod or container map: failed to list all containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698274 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926377 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698293 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926435 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698309 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926447 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698325 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.994121 2634 log.go:32] "Version from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698341 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927827 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698362 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927867 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698378 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927930 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698395 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140553 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698408 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140635 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698424 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140653 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698439 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140664 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698455 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930020 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698469 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930083 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698484 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930129 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698501 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932311 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698514 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932363 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698527 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932375 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698541 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140618 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698554 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140679 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698577 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140697 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698590 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140709 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698606 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933321 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698620 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933382 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698634 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933393 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698650 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241324 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698666 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241391 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698682 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935649 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698696 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935930 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698711 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935948 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698727 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140389 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698743 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140507 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698756 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140525 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698769 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140536 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698784 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937258 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698808 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937350 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698823 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937364 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698840 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939069 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698853 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939129 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698868 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939141 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698886 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139354 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:55:00.698903 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139413 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698916 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139428 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698933 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139440 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698948 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941237 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:55:00.698962 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941285 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698976 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941296 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:55:00.698993 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.109014 2634 kubelet.go:2691] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="6.97s"
I1229 06:55:00.699007 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.125762 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:55:00.699018 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.129855 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699031 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.131487 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.699042 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.140438 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.699055 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.699067 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:55:00.699078 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:55:00.699093 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.699105 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.699119 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:55:00.699130 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:55:00.699145 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.699157 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:55:00.699180 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:55:00.699195 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:55:00.699207 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:55:00.699224 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:55:00.699243 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:55:00.699256 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:55:00.699269 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.699284 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.699310 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.699330 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.699343 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:55:00.699362 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:55:00.699380 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.699407 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:55:00.699439 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:55:00.699460 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:55:00.699477 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.699497 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.699515 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:55:00.699533 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.699619 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.699640 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.699660 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:55:00.699683 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:55:00.699709 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:55:00.699722 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:55:00.699738 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699750 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:55:00.699763 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.699774 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:55:00.699785 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.699807 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:55:00.699820 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.699834 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699846 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.699861 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.699872 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.699886 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:55:00.699931 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.699946 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.699956 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:55:00.699972 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700008 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:55:00.700031 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.700053 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700067 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:55:00.700078 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:55:00.700091 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:55:00.700102 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.700116 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:55:00.700129 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.700139 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:55:00.700159 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.700168 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:55:00.700179 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:55:00.700190 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700199 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:55:00.700217 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700228 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.700240 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:55:00.700250 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:55:00.700268 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.700281 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:55:00.700291 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:55:00.700310 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:55:00.700321 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.700331 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:55:00.700349 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.700364 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700375 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:55:00.700394 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700405 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.700415 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:55:00.700427 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:55:00.700454 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:55:00.700474 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:55:00.700515 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:55:00.700529 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:55:00.700539 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:55:00.700558 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:55:00.700570 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:55:00.700578 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:55:00.700584 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:55:00.700590 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:55:00.700597 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:55:00.700603 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:55:00.700612 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:55:00.700620 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:55:00.700631 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:55:00.700641 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:55:00.700652 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:55:00.700662 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:55:00.700674 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:55:00.700684 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:55:00.700696 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:55:00.700707 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:55:00.700717 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:55:00.700758 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:55:00.700770 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:55:00.700779 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:55:00.700790 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:55:00.700816 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:55:00.700831 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:55:00.700846 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:55:00.700858 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:55:00.700866 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:55:00.700879 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:55:00.700891 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:55:00.700905 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:55:00.700912 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:55:00.700921 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:55:00.700932 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.700943 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:55:00.700951 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:55:00.700963 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:55:00.700971 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:55:00.700986 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:55:00.701000 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:55:00.701008 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:55:00.701020 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:55:00.701029 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:55:00.701037 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:55:00.701046 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:55:00.701061 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:55:00.701073 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:55:00.701082 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:55:00.701093 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:55:00.701100 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:55:00.701114 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:55:00.701124 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:55:00.701143 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.701160 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:55:00.701170 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:55:00.701178 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:55:00.701188 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:55:00.701201 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:55:00.701210 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:55:00.701218 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:55:00.701226 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:55:00.701237 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:55:00.701246 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:55:00.701256 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:55:00.701266 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:55:00.701277 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:55:00.701287 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:55:00.701297 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:55:00.701308 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:55:00.701322 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701334 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701348 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701361 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:55:00.701372 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:55:00.701385 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:55:00.701399 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:55:00.701410 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:55:00.701422 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:55:00.701433 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701447 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:55:00.701458 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:55:00.701471 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:55:00.701483 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:55:00.701496 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:55:00.701508 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:55:00.701521 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:55:00.701533 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:55:00.701550 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:55:00.701567 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:55:00.701581 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701592 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701611 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:55:00.701625 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701642 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:55:00.701678 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:55:00.701695 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:55:00.701705 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:55:00.701716 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701735 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:55:00.701749 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.701764 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:55:00.701780 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:55:00.701807 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.701827 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.701847 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701867 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701886 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:55:00.701907 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:55:00.701928 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701948 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701971 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:55:00.701995 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.702014 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:55:00.702027 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:55:00.755255 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:55:00.755293 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:55:00.771031 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:55:00.771066 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:55:00.771079 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:55:00.771088 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:55:00.771097 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:55:00.771103 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:55:00.771109 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:55:00.771116 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:55:00.771121 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:55:00.771126 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:55:00.771131 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:55:00.771136 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:55:00.771143 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:55:00.771153 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:55:00.771158 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:55:00.771165 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:55:00.771175 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:55:00.771185 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:55:00.771191 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:55:00.771196 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:55:00.771202 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:55:00.772218 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:55:00.772246 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:56:00.863293 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:56:00.863340 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.091082059s)
W1229 06:56:00.863385 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:56:00.863402 17440 logs.go:123] Gathering logs for kube-apiserver [fb6db97d8ffe] ...
I1229 06:56:00.863420 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 fb6db97d8ffe"
I1229 06:56:00.897112 17440 command_runner.go:130] ! I1229 06:53:50.588377 1 options.go:263] external host was not specified, using 192.168.39.121
I1229 06:56:00.897142 17440 command_runner.go:130] ! I1229 06:53:50.597275 1 server.go:150] Version: v1.35.0
I1229 06:56:00.897153 17440 command_runner.go:130] ! I1229 06:53:50.597323 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:00.897164 17440 command_runner.go:130] ! E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
W1229 06:56:00.898716 17440 logs.go:138] Found kube-apiserver [fb6db97d8ffe] problem: E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:56:00.898738 17440 logs.go:123] Gathering logs for etcd [d81259f64136] ...
I1229 06:56:00.898750 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 d81259f64136"
I1229 06:56:00.935530 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517725Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:00.938590 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517828Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:56:00.938653 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.517848Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:56:00.938666 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519323Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:56:00.938679 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.519372Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:00.938689 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519700Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:56:00.938712 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.522332Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:00.938728 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.530852Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:56:00.938838 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.531312Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:56:00.938875 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.533505Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc00006a930}"}
I1229 06:56:00.938892 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.562961Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:56:00.938902 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.566967Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"33.344174ms"}
I1229 06:56:00.938913 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.569353Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":497}
I1229 06:56:00.938922 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596637Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:56:00.938935 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596694Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:56:00.938946 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596795Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:56:00.938958 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.620855Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":497}
I1229 06:56:00.938969 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.621587Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:56:00.938978 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624518Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:56:00.938993 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624664Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:56:00.939003 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624700Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:56:00.939022 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624712Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:56:00.939035 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624720Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:56:00.939046 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624728Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:56:00.939053 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624764Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:56:00.939062 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624799Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:56:00.939071 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624856Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 2"}
I1229 06:56:00.939081 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624867Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 2, commit: 497, applied: 0, lastindex: 497, lastterm: 2]"}
I1229 06:56:00.939091 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.634002Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:56:00.939111 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.644772Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:56:00.939126 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.681530Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:56:00.939142 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686046Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:56:00.939162 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686350Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:56:00.939181 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686391Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:56:00.939213 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687141Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:00.939249 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687399Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:56:00.939258 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687425Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:56:00.939274 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687475Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:56:00.939289 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687536Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:56:00.939302 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687564Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:56:00.939324 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687571Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:56:00.939342 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687702Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:00.939352 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687713Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:56:00.939362 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692750Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:56:00.939377 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692847Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:56:00.939389 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.694703Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:56:00.939404 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830725Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 2"}
I1229 06:56:00.939423 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830765Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 2"}
I1229 06:56:00.939439 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830844Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 2"}
I1229 06:56:00.939458 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830858Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:56:00.939467 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830936Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 3"}
I1229 06:56:00.939478 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832101Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:56:00.939494 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832130Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:56:00.939513 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832148Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 3"}
I1229 06:56:00.939528 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832166Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 3"}
I1229 06:56:00.939544 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835446Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:00.939564 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835384Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:56:00.939586 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839145Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:00.939603 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839733Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:00.939616 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851017Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:56:00.939882 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851067Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:56:00.939915 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851748Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:00.939932 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.856729Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:56:00.939947 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.869216Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:56:00.939960 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706108Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
I1229 06:56:00.939998 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706269Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:00.940030 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:51.706381Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:00.940064 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.707655Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:00.940122 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.709799Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940150 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.709913Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cbdf275f553df7c2","current-leader-member-id":"cbdf275f553df7c2"}
I1229 06:56:00.940167 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710255Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
I1229 06:56:00.940187 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710690Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:00.940204 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710782Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:00.940257 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.710832Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940277 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710742Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
I1229 06:56:00.940301 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711002Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:00.940334 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711035Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:00.940371 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.711045Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940389 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717452Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:00.940425 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.717551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:00.940447 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717601Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:00.940473 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717654Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:00.955065 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:56:00.955108 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
W1229 06:56:00.955188 17440 out.go:285] X Problems detected in kube-apiserver [fb6db97d8ffe]:
W1229 06:56:00.955202 17440 out.go:285] E1229 06:53:50.606724 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:56:00.955209 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:56:00.955215 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:56:10.957344 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:56:15.961183 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:56:15.961319 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I1229 06:56:15.981705 17440 command_runner.go:130] > 18d0015c724a
I1229 06:56:15.982641 17440 logs.go:282] 1 containers: [18d0015c724a]
I1229 06:56:15.982732 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I1229 06:56:16.002259 17440 command_runner.go:130] > 6b7711ee25a2
I1229 06:56:16.002292 17440 command_runner.go:130] > d81259f64136
I1229 06:56:16.002322 17440 logs.go:282] 2 containers: [6b7711ee25a2 d81259f64136]
I1229 06:56:16.002399 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I1229 06:56:16.021992 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:56:16.022032 17440 logs.go:282] 1 containers: [6f69ba6a1553]
I1229 06:56:16.022113 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I1229 06:56:16.048104 17440 command_runner.go:130] > 4d49952084c9
I1229 06:56:16.048133 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:56:16.049355 17440 logs.go:282] 2 containers: [4d49952084c9 a79d99ad3fde]
I1229 06:56:16.049441 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I1229 06:56:16.071523 17440 command_runner.go:130] > 8911777281f4
I1229 06:56:16.072578 17440 logs.go:282] 1 containers: [8911777281f4]
I1229 06:56:16.072668 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I1229 06:56:16.092921 17440 command_runner.go:130] > f48fc04e3475
I1229 06:56:16.092948 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:56:16.092975 17440 logs.go:282] 2 containers: [f48fc04e3475 17fe16a2822a]
I1229 06:56:16.093047 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I1229 06:56:16.113949 17440 logs.go:282] 0 containers: []
W1229 06:56:16.113983 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:56:16.114047 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I1229 06:56:16.135700 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:56:16.135739 17440 logs.go:282] 1 containers: [bd96b57aa9fc]
I1229 06:56:16.135766 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:56:16.135786 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:56:16.152008 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:56:16.152038 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:56:16.152046 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:56:16.152054 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:56:16.152063 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:56:16.152069 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:56:16.152076 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:56:16.152081 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:56:16.152086 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:56:16.152091 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:56:16.152096 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:56:16.152102 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:56:16.152107 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:56:16.152112 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:56:16.152119 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:56:16.152128 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:56:16.152148 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:56:16.152164 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:56:16.152180 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:56:16.152190 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:56:16.152201 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:56:16.152209 17440 command_runner.go:130] > [Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
I1229 06:56:16.152217 17440 command_runner.go:130] > [ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
I1229 06:56:16.153163 17440 logs.go:123] Gathering logs for kube-proxy [8911777281f4] ...
I1229 06:56:16.153192 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 8911777281f4"
I1229 06:56:16.174824 17440 command_runner.go:130] ! I1229 06:52:47.703648 1 server_linux.go:53] "Using iptables proxy"
I1229 06:56:16.174856 17440 command_runner.go:130] ! I1229 06:52:47.791676 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:56:16.174862 17440 command_runner.go:130] ! I1229 06:52:47.897173 1 shared_informer.go:377] "Caches are synced"
I1229 06:56:16.174873 17440 command_runner.go:130] ! I1229 06:52:47.900073 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.121"]
I1229 06:56:16.174892 17440 command_runner.go:130] ! E1229 06:52:47.906310 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1229 06:56:16.174900 17440 command_runner.go:130] ! I1229 06:52:48.206121 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
I1229 06:56:16.174913 17440 command_runner.go:130] ! error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
I1229 06:56:16.174920 17440 command_runner.go:130] ! Perhaps ip6tables or your kernel needs to be upgraded.
I1229 06:56:16.174924 17440 command_runner.go:130] ! >
I1229 06:56:16.174931 17440 command_runner.go:130] ! I1229 06:52:48.209509 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1229 06:56:16.174941 17440 command_runner.go:130] ! I1229 06:52:48.210145 1 server_linux.go:136] "Using iptables Proxier"
I1229 06:56:16.174957 17440 command_runner.go:130] ! I1229 06:52:48.253805 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1229 06:56:16.174966 17440 command_runner.go:130] ! I1229 06:52:48.255046 1 server.go:529] "Version info" version="v1.35.0"
I1229 06:56:16.174975 17440 command_runner.go:130] ! I1229 06:52:48.255076 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.174985 17440 command_runner.go:130] ! I1229 06:52:48.262205 1 config.go:200] "Starting service config controller"
I1229 06:56:16.174994 17440 command_runner.go:130] ! I1229 06:52:48.262238 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1229 06:56:16.175003 17440 command_runner.go:130] ! I1229 06:52:48.262258 1 config.go:106] "Starting endpoint slice config controller"
I1229 06:56:16.175012 17440 command_runner.go:130] ! I1229 06:52:48.262261 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1229 06:56:16.175024 17440 command_runner.go:130] ! I1229 06:52:48.262278 1 config.go:403] "Starting serviceCIDR config controller"
I1229 06:56:16.175033 17440 command_runner.go:130] ! I1229 06:52:48.262282 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1229 06:56:16.175040 17440 command_runner.go:130] ! I1229 06:52:48.270608 1 config.go:309] "Starting node config controller"
I1229 06:56:16.175050 17440 command_runner.go:130] ! I1229 06:52:48.271311 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1229 06:56:16.175074 17440 command_runner.go:130] ! I1229 06:52:48.271337 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1229 06:56:16.175325 17440 command_runner.go:130] ! I1229 06:52:48.363324 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1229 06:56:16.175351 17440 command_runner.go:130] ! I1229 06:52:48.363427 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1229 06:56:16.175362 17440 command_runner.go:130] ! I1229 06:52:48.363671 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1229 06:56:16.177120 17440 logs.go:123] Gathering logs for container status ...
I1229 06:56:16.177144 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:56:16.222627 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
I1229 06:56:16.222665 17440 command_runner.go:130] > 18d0015c724a8 5c6acd67e9cd1 5 seconds ago Exited kube-apiserver 3 d3819cc8ab802 kube-apiserver-functional-695625 kube-system
I1229 06:56:16.222684 17440 command_runner.go:130] > f48fc04e34751 2c9a4b058bd7e 16 seconds ago Running kube-controller-manager 2 0a96e34d38f8c kube-controller-manager-functional-695625 kube-system
I1229 06:56:16.222707 17440 command_runner.go:130] > 6b7711ee25a2d 0a108f7189562 16 seconds ago Running etcd 2 173054afc2f39 etcd-functional-695625 kube-system
I1229 06:56:16.222730 17440 command_runner.go:130] > 4d49952084c92 550794e3b12ac 2 minutes ago Running kube-scheduler 2 fefef7c5591ea kube-scheduler-functional-695625 kube-system
I1229 06:56:16.222749 17440 command_runner.go:130] > 6f69ba6a1553a aa5e3ebc0dfed 2 minutes ago Exited coredns 1 a014f32abcd01 coredns-7d764666f9-wfq7m kube-system
I1229 06:56:16.222768 17440 command_runner.go:130] > d81259f64136c 0a108f7189562 2 minutes ago Exited etcd 1 1fc5fa7d92959 etcd-functional-695625 kube-system
I1229 06:56:16.222810 17440 command_runner.go:130] > 17fe16a2822a8 2c9a4b058bd7e 2 minutes ago Exited kube-controller-manager 1 98261fa185f6e kube-controller-manager-functional-695625 kube-system
I1229 06:56:16.222831 17440 command_runner.go:130] > a79d99ad3fde3 550794e3b12ac 2 minutes ago Exited kube-scheduler 1 b046056ff071b kube-scheduler-functional-695625 kube-system
I1229 06:56:16.222851 17440 command_runner.go:130] > bd96b57aa9fce 6e38f40d628db 3 minutes ago Exited storage-provisioner 0 64853b50a6c5e storage-provisioner kube-system
I1229 06:56:16.222879 17440 command_runner.go:130] > 8911777281f41 32652ff1bbe6b 3 minutes ago Exited kube-proxy 0 548561c7ada8f kube-proxy-g7lp9 kube-system
I1229 06:56:16.225409 17440 logs.go:123] Gathering logs for etcd [6b7711ee25a2] ...
I1229 06:56:16.225439 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6b7711ee25a2"
I1229 06:56:16.247416 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.924768Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.247449 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925193Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:56:16.247516 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925252Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:56:16.247533 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925487Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:56:16.247545 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925602Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.247555 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925710Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:56:16.247582 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925810Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.247605 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.934471Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:56:16.247698 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.935217Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:56:16.247722 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.937503Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc000068080}"}
I1229 06:56:16.247733 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940423Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:56:16.247745 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940850Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"3.479356ms"}
I1229 06:56:16.247753 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.941120Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":499}
I1229 06:56:16.247762 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945006Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:56:16.247774 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945707Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:56:16.247782 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945966Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:56:16.247807 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.951906Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":499}
I1229 06:56:16.247816 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952063Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:56:16.247825 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952160Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:56:16.247840 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952338Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:56:16.247851 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952385Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:56:16.247867 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952396Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:56:16.247878 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952406Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:56:16.247886 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952416Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:56:16.247893 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952460Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:56:16.247902 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952535Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:56:16.247914 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952604Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 3"}
I1229 06:56:16.247924 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952619Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 3, commit: 499, applied: 0, lastindex: 499, lastterm: 3]"}
I1229 06:56:16.247935 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.955095Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:56:16.247952 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.961356Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:56:16.247965 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.967658Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:56:16.247975 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.968487Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:56:16.247988 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969020Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:56:16.248000 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969260Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:56:16.248016 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969708Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:56:16.248035 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970043Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.248063 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970828Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:56:16.248074 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971046Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:56:16.248083 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970057Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.248092 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971258Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:56:16.248103 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970152Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:56:16.248113 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971336Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:56:16.248126 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971370Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:56:16.248136 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970393Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:56:16.248153 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972410Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:56:16.248166 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972698Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:56:16.248177 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353814Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 3"}
I1229 06:56:16.248186 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353865Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 3"}
I1229 06:56:16.248198 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353992Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:56:16.248208 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354007Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:56:16.248219 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354031Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 4"}
I1229 06:56:16.248228 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355853Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 4"}
I1229 06:56:16.248240 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355920Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:56:16.248248 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355940Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 4"}
I1229 06:56:16.248260 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.356018Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 4"}
I1229 06:56:16.248275 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358237Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:56:16.248287 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358323Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.248295 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358268Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.248304 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358820Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:56:16.248312 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358859Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:56:16.248320 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360417Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.248331 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360952Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.248341 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363710Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:56:16.248352 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363760Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:56:16.254841 17440 logs.go:123] Gathering logs for etcd [d81259f64136] ...
I1229 06:56:16.254869 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 d81259f64136"
I1229 06:56:16.278647 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517725Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.278679 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517828Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:56:16.278723 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.517848Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:56:16.278736 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519323Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:56:16.278750 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.519372Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:56:16.278759 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519700Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:56:16.278780 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.522332Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.278809 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.530852Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:56:16.278890 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.531312Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:56:16.278913 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.533505Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc00006a930}"}
I1229 06:56:16.278923 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.562961Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:56:16.278935 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.566967Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"33.344174ms"}
I1229 06:56:16.278946 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.569353Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":497}
I1229 06:56:16.278957 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596637Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:56:16.278971 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596694Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:56:16.278982 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596795Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:56:16.278996 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.620855Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":497}
I1229 06:56:16.279006 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.621587Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:56:16.279014 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624518Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:56:16.279031 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624664Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:56:16.279040 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624700Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:56:16.279072 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624712Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:56:16.279083 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624720Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:56:16.279091 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624728Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:56:16.279101 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624764Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:56:16.279110 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624799Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:56:16.279121 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624856Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 2"}
I1229 06:56:16.279132 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624867Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 2, commit: 497, applied: 0, lastindex: 497, lastterm: 2]"}
I1229 06:56:16.279142 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.634002Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:56:16.279159 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.644772Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:56:16.279173 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.681530Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:56:16.279183 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686046Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:56:16.279195 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686350Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:56:16.279208 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686391Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:56:16.279226 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687141Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:56:16.279249 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687399Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:56:16.279260 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687425Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:56:16.279275 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687475Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:56:16.279289 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687536Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:56:16.279300 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687564Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:56:16.279313 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687571Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:56:16.279322 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687702Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.279332 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687713Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:56:16.279343 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692750Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:56:16.279359 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692847Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:56:16.279374 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.694703Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:56:16.279386 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830725Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 2"}
I1229 06:56:16.279396 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830765Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 2"}
I1229 06:56:16.279406 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830844Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 2"}
I1229 06:56:16.279418 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830858Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:56:16.279429 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830936Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 3"}
I1229 06:56:16.279439 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832101Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:56:16.279451 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832130Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:56:16.279460 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832148Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 3"}
I1229 06:56:16.279469 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832166Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 3"}
I1229 06:56:16.279479 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835446Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.279494 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835384Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:56:16.279503 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839145Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:56:16.279513 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839733Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.279523 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851017Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:56:16.279531 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851067Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:56:16.279541 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851748Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:56:16.279551 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.856729Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:56:16.279562 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.869216Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:56:16.279570 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706108Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
I1229 06:56:16.279585 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706269Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:16.279603 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:51.706381Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:16.279622 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.707655Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:56:16.279661 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.709799Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279676 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.709913Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cbdf275f553df7c2","current-leader-member-id":"cbdf275f553df7c2"}
I1229 06:56:16.279688 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710255Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
I1229 06:56:16.279698 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710690Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:16.279711 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710782Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:56:16.279730 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.710832Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279741 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710742Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
I1229 06:56:16.279751 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711002Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:16.279764 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711035Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:56:16.279785 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.711045Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279805 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717452Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.279825 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.717551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:56:16.279836 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717601Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:56:16.279852 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717654Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:56:16.287590 17440 logs.go:123] Gathering logs for coredns [6f69ba6a1553] ...
I1229 06:56:16.287613 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6f69ba6a1553"
I1229 06:56:16.310292 17440 logs.go:123] Gathering logs for storage-provisioner [bd96b57aa9fc] ...
I1229 06:56:16.310320 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 bd96b57aa9fc"
I1229 06:56:16.331009 17440 command_runner.go:130] ! I1229 06:52:48.539098 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1229 06:56:16.331044 17440 command_runner.go:130] ! I1229 06:52:48.550309 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1229 06:56:16.331054 17440 command_runner.go:130] ! I1229 06:52:48.550373 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1229 06:56:16.331067 17440 command_runner.go:130] ! W1229 06:52:48.552935 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331076 17440 command_runner.go:130] ! W1229 06:52:48.563735 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331083 17440 command_runner.go:130] ! I1229 06:52:48.564362 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1229 06:56:16.331093 17440 command_runner.go:130] ! I1229 06:52:48.565422 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:56:16.331114 17440 command_runner.go:130] ! I1229 06:52:48.565143 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cfb65e16-c2f7-4c19-a059-8ef64f8f3f2e", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868 became leader
I1229 06:56:16.331232 17440 command_runner.go:130] ! W1229 06:52:48.576668 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331256 17440 command_runner.go:130] ! W1229 06:52:48.582743 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331268 17440 command_runner.go:130] ! I1229 06:52:48.665711 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:56:16.331275 17440 command_runner.go:130] ! W1229 06:52:50.588204 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331289 17440 command_runner.go:130] ! W1229 06:52:50.593815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331298 17440 command_runner.go:130] ! W1229 06:52:52.597431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331316 17440 command_runner.go:130] ! W1229 06:52:52.602815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331329 17440 command_runner.go:130] ! W1229 06:52:54.606663 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331341 17440 command_runner.go:130] ! W1229 06:52:54.612650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331355 17440 command_runner.go:130] ! W1229 06:52:56.616395 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331363 17440 command_runner.go:130] ! W1229 06:52:56.622404 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331374 17440 command_runner.go:130] ! W1229 06:52:58.626804 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331386 17440 command_runner.go:130] ! W1229 06:52:58.637257 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331400 17440 command_runner.go:130] ! W1229 06:53:00.640728 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331413 17440 command_runner.go:130] ! W1229 06:53:00.646446 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331425 17440 command_runner.go:130] ! W1229 06:53:02.650659 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331441 17440 command_runner.go:130] ! W1229 06:53:02.660374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331454 17440 command_runner.go:130] ! W1229 06:53:04.664091 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331468 17440 command_runner.go:130] ! W1229 06:53:04.669806 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331478 17440 command_runner.go:130] ! W1229 06:53:06.674203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331488 17440 command_runner.go:130] ! W1229 06:53:06.680002 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331496 17440 command_runner.go:130] ! W1229 06:53:08.683483 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331506 17440 command_runner.go:130] ! W1229 06:53:08.688934 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331519 17440 command_runner.go:130] ! W1229 06:53:10.693644 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331529 17440 command_runner.go:130] ! W1229 06:53:10.706122 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331537 17440 command_runner.go:130] ! W1229 06:53:12.709949 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331547 17440 command_runner.go:130] ! W1229 06:53:12.715753 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331555 17440 command_runner.go:130] ! W1229 06:53:14.719191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331564 17440 command_runner.go:130] ! W1229 06:53:14.728100 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331572 17440 command_runner.go:130] ! W1229 06:53:16.731658 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331580 17440 command_runner.go:130] ! W1229 06:53:16.737463 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331592 17440 command_runner.go:130] ! W1229 06:53:18.741304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331604 17440 command_runner.go:130] ! W1229 06:53:18.746708 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331618 17440 command_runner.go:130] ! W1229 06:53:20.749662 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331629 17440 command_runner.go:130] ! W1229 06:53:20.755989 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331645 17440 command_runner.go:130] ! W1229 06:53:22.760815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331659 17440 command_runner.go:130] ! W1229 06:53:22.772421 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331673 17440 command_runner.go:130] ! W1229 06:53:24.776403 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331689 17440 command_runner.go:130] ! W1229 06:53:24.783232 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331703 17440 command_runner.go:130] ! W1229 06:53:26.786665 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331716 17440 command_runner.go:130] ! W1229 06:53:26.792239 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331728 17440 command_runner.go:130] ! W1229 06:53:28.796420 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331740 17440 command_runner.go:130] ! W1229 06:53:28.805511 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331756 17440 command_runner.go:130] ! W1229 06:53:30.808544 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331771 17440 command_runner.go:130] ! W1229 06:53:30.816066 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331784 17440 command_runner.go:130] ! W1229 06:53:32.820090 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331816 17440 command_runner.go:130] ! W1229 06:53:32.826208 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331830 17440 command_runner.go:130] ! W1229 06:53:34.829865 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331847 17440 command_runner.go:130] ! W1229 06:53:34.835774 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331863 17440 command_runner.go:130] ! W1229 06:53:36.839291 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331879 17440 command_runner.go:130] ! W1229 06:53:36.853251 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331894 17440 command_runner.go:130] ! W1229 06:53:38.856432 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.331908 17440 command_runner.go:130] ! W1229 06:53:38.862360 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:56:16.336243 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:56:16.336267 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:56:16.358115 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358145 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.358155 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.358165 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.358177 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.358186 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:56:16.358194 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:56:16.358203 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358209 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.358220 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:56:16.358229 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.358241 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.358254 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.358266 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.358278 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.358285 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:56:16.358307 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.358315 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:56:16.358328 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.358336 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:56:16.358343 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:56:16.358350 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358360 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.358369 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.358377 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.358385 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.358399 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.358408 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.358415 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358425 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.358436 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.358445 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.358455 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:56:16.358463 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:56:16.358474 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:56:16.358481 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:56:16.358491 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:56:16.358500 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:56:16.358508 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:56:16.358515 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358530 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:56:16.358543 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:56:16.358555 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.358576 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.358584 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:56:16.358593 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:56:16.358604 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:56:16.358614 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:56:16.358621 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:56:16.358628 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:56:16.358635 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.358644 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:56:16.358653 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.358666 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.358685 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.358697 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.358707 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.358716 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:56:16.358735 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.358745 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:56:16.358755 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.358763 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:56:16.358805 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.358818 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.358827 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.358837 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.358847 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.358854 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.358861 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358867 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:56:16.358874 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358881 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.358893 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.358904 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.358913 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.358921 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:56:16.358930 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:56:16.358942 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:56:16.358950 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:56:16.358959 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:56:16.358970 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:56:16.358979 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:56:16.358986 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:56:16.358992 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:56:16.359001 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:56:16.359011 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:56:16.359021 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:56:16.359029 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:56:16.359036 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:56:16.359042 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.359052 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:56:16.359060 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.359071 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.359084 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.359094 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.359106 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.359113 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:56:16.359135 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.359144 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:56:16.359154 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.359164 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:56:16.359172 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.359182 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.359190 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.359198 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.359206 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.359213 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.359244 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359260 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359275 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359288 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359300 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:56:16.359313 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359328 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359343 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359357 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.359372 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359386 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359399 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.359410 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:56:16.359422 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:56:16.359435 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:56:16.359442 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:56:16.359452 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:56:16.359460 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:56:16.359468 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:56:16.359474 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:56:16.359481 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:56:16.359487 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:56:16.359494 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:56:16.359502 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:56:16.359511 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:56:16.359521 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:56:16.359532 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:56:16.359544 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:56:16.359553 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:56:16.359561 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:56:16.359574 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359590 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359602 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359617 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359630 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359646 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359660 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359676 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359689 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359706 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359719 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359731 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359744 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.359763 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:56:16.359779 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:56:16.359800 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:56:16.359813 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:56:16.359827 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:56:16.359837 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:56:16.359852 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:56:16.359864 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:56:16.359878 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:56:16.359890 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:56:16.359904 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:56:16.359916 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:56:16.359932 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:56:16.359945 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:56:16.359960 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:56:16.359975 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:56:16.359988 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:56:16.360003 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:56:16.360019 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:56:16.360037 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:56:16.360051 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:56:16.360064 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:56:16.360074 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:56:16.360085 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:56:16.360093 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:56:16.360102 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:56:16.360113 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:56:16.360121 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:56:16.360130 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:56:16.360163 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:56:16.360172 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:56:16.360189 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:56:16.360197 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:56:16.360204 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:56:16.360210 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:56:16.360218 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:56:16.360225 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:56:16.360236 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:56:16.360245 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:56:16.360255 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:56:16.360263 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:56:16.360271 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:56:16.360280 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:56:16.360288 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:56:16.360297 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:56:16.360308 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:56:16.360317 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:56:16.360326 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:56:16.360338 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360353 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360365 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360380 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360392 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360410 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360426 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:56:16.360441 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360454 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360467 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360482 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360494 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360510 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360525 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360538 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360553 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360566 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360582 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360599 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:56:16.360617 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360628 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:56:16.360643 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:56:16.360656 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360671 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:56:16.360682 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:56:16.360699 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.360711 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:56:16.360726 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.360736 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:56:16.360749 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 dockerd[4014]: time="2025-12-29T06:56:00.626282205Z" level=info msg="ignoring event" container=78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.360762 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 dockerd[4014]: time="2025-12-29T06:56:11.553142622Z" level=info msg="ignoring event" container=18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:56:16.377860 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:56:16.377891 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:56:16.394828 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.406131 2634 kubelet_node_status.go:427] "Fast updating node status as it just became ready"
I1229 06:56:16.394877 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519501 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b64sn\" (UniqueName: \"kubernetes.io/projected/00a95e37-1394-45a7-a376-b195e31e3e9c-kube-api-access-b64sn\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:56:16.394896 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519550 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00a95e37-1394-45a7-a376-b195e31e3e9c-config-volume\") pod \"coredns-7d764666f9-wfq7m\" (UID: \"00a95e37-1394-45a7-a376-b195e31e3e9c\") " pod="kube-system/coredns-7d764666f9-wfq7m"
I1229 06:56:16.394920 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519571 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:56:16.394952 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 kubelet[2634]: I1229 06:52:46.519587 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"coredns-7d764666f9-9mrnn\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") " pod="kube-system/coredns-7d764666f9-9mrnn"
I1229 06:56:16.394976 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.411642 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605"
I1229 06:56:16.394988 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.545186 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.395012 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731196 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/b5f201ca-6d54-4e15-9584-396fb1486f3c-tmp\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:56:16.395045 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 kubelet[2634]: I1229 06:52:47.731252 2634 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghc5d\" (UniqueName: \"kubernetes.io/projected/b5f201ca-6d54-4e15-9584-396fb1486f3c-kube-api-access-ghc5d\") pod \"storage-provisioner\" (UID: \"b5f201ca-6d54-4e15-9584-396fb1486f3c\") " pod="kube-system/storage-provisioner"
I1229 06:56:16.395075 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.628275 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395109 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.634714 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-9mrnn" podStartSLOduration=2.634698273 podStartE2EDuration="2.634698273s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.631484207 +0000 UTC m=+7.674081027" watchObservedRunningTime="2025-12-29 06:52:48.634698273 +0000 UTC m=+7.677295093"
I1229 06:56:16.395143 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: E1229 06:52:48.649761 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.395179 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.694857 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/coredns-7d764666f9-wfq7m" podStartSLOduration=2.694842541 podStartE2EDuration="2.694842541s" podCreationTimestamp="2025-12-29 06:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.672691157 +0000 UTC m=+7.715287974" watchObservedRunningTime="2025-12-29 06:52:48.694842541 +0000 UTC m=+7.737439360"
I1229 06:56:16.395221 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 kubelet[2634]: I1229 06:52:48.728097 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.728082592 podStartE2EDuration="1.728082592s" podCreationTimestamp="2025-12-29 06:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.696376688 +0000 UTC m=+7.738973499" watchObservedRunningTime="2025-12-29 06:52:48.728082592 +0000 UTC m=+7.770679413"
I1229 06:56:16.395242 17440 command_runner.go:130] > Dec 29 06:52:49 functional-695625 kubelet[2634]: E1229 06:52:49.674249 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395263 17440 command_runner.go:130] > Dec 29 06:52:50 functional-695625 kubelet[2634]: E1229 06:52:50.680852 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395283 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.223368 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.395324 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: I1229 06:52:52.243928 2634 pod_startup_latency_tracker.go:108] "Observed pod startup duration" pod="kube-system/kube-proxy-g7lp9" podStartSLOduration=7.243911092 podStartE2EDuration="7.243911092s" podCreationTimestamp="2025-12-29 06:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-29 06:52:48.744380777 +0000 UTC m=+7.786977597" watchObservedRunningTime="2025-12-29 06:52:52.243911092 +0000 UTC m=+11.286507895"
I1229 06:56:16.395347 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.396096 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.395368 17440 command_runner.go:130] > Dec 29 06:52:52 functional-695625 kubelet[2634]: E1229 06:52:52.693687 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.395390 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: E1229 06:52:53.390926 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.395423 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979173 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:56:16.395451 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979225 2634 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") pod \"c4313c5f-3b86-48de-8f3c-02d7e007542a\" (UID: \"c4313c5f-3b86-48de-8f3c-02d7e007542a\") "
I1229 06:56:16.395496 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.979732 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue ""
I1229 06:56:16.395529 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 kubelet[2634]: I1229 06:52:53.981248 2634 operation_generator.go:779] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj" pod "c4313c5f-3b86-48de-8f3c-02d7e007542a" (UID: "c4313c5f-3b86-48de-8f3c-02d7e007542a"). InnerVolumeSpecName "kube-api-access-lc5xj". PluginName "kubernetes.io/projected", VolumeGIDValue ""
I1229 06:56:16.395551 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079447 2634 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4313c5f-3b86-48de-8f3c-02d7e007542a-config-volume\") on node \"functional-695625\" DevicePath \"\""
I1229 06:56:16.395578 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.079521 2634 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-lc5xj\" (UniqueName: \"kubernetes.io/projected/c4313c5f-3b86-48de-8f3c-02d7e007542a-kube-api-access-lc5xj\") on node \"functional-695625\" DevicePath \"\""
I1229 06:56:16.395597 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.715729 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395618 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.756456 2634 scope.go:122] "RemoveContainer" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395641 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: E1229 06:52:54.758451 2634 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f" containerID="67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395678 17440 command_runner.go:130] > Dec 29 06:52:54 functional-695625 kubelet[2634]: I1229 06:52:54.758508 2634 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"} err="failed to get container status \"67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f"
I1229 06:56:16.395702 17440 command_runner.go:130] > Dec 29 06:52:55 functional-695625 kubelet[2634]: I1229 06:52:55.144582 2634 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="c4313c5f-3b86-48de-8f3c-02d7e007542a" path="/var/lib/kubelet/pods/c4313c5f-3b86-48de-8f3c-02d7e007542a/volumes"
I1229 06:56:16.395719 17440 command_runner.go:130] > Dec 29 06:52:58 functional-695625 kubelet[2634]: E1229 06:52:58.655985 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.395743 17440 command_runner.go:130] > Dec 29 06:53:20 functional-695625 kubelet[2634]: E1229 06:53:20.683378 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.395770 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913108 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.395806 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913180 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395831 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 kubelet[2634]: E1229 06:53:25.913193 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395859 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915141 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.395885 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915181 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395903 17440 command_runner.go:130] > Dec 29 06:53:26 functional-695625 kubelet[2634]: E1229 06:53:26.915192 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395929 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139490 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.395956 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139600 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.395981 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139623 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396000 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.139634 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396027 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917175 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396052 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917271 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396087 17440 command_runner.go:130] > Dec 29 06:53:27 functional-695625 kubelet[2634]: E1229 06:53:27.917284 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396114 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918722 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396138 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918780 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396161 17440 command_runner.go:130] > Dec 29 06:53:28 functional-695625 kubelet[2634]: E1229 06:53:28.918792 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396186 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139097 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.396267 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139170 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396295 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139187 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396315 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.139214 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396339 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921730 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396362 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921808 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396387 17440 command_runner.go:130] > Dec 29 06:53:29 functional-695625 kubelet[2634]: E1229 06:53:29.921823 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396413 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.923664 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396433 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924161 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396458 17440 command_runner.go:130] > Dec 29 06:53:30 functional-695625 kubelet[2634]: E1229 06:53:30.924185 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396484 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139396 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.396508 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139458 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396526 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139472 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396550 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.139485 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396585 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239338 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396609 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239383 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396634 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244411 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:56:16.396662 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244504 2634 container_log_manager.go:154] "Failed to rotate container logs" err="failed to list containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396687 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458139 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:56:16.396711 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458218 2634 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to list pod stats: failed to get pod or container map: failed to list all containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396739 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926377 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396763 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926435 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396786 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926447 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396821 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.994121 2634 log.go:32] "Version from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396848 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927827 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.396872 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927867 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396891 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927930 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396919 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140553 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.396943 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140635 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396966 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140653 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.396989 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140664 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397016 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930020 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397040 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930083 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397064 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930129 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397089 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932311 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397114 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932363 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397139 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932375 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397161 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140618 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.397187 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140679 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397211 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140697 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397233 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140709 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397256 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933321 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397281 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933382 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397307 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933393 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397330 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241324 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397358 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241391 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397387 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935649 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397424 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935930 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397450 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935948 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397477 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140389 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.397500 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140507 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397521 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140525 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397544 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140536 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397571 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937258 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397594 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937350 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397618 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937364 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397644 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939069 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397668 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939129 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397686 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939141 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397742 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139354 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:56:16.397766 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139413 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397786 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139428 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397818 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139440 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397849 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941237 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:56:16.397872 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941285 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397897 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941296 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:56:16.397918 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.109014 2634 kubelet.go:2691] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="6.97s"
I1229 06:56:16.397940 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.125762 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:56:16.397961 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.129855 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.397984 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.131487 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.398006 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.140438 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.398027 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.398047 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:56:16.398071 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:56:16.398100 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:56:16.398122 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:56:16.398141 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:56:16.398162 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:56:16.398186 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:56:16.398209 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:56:16.398244 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:56:16.398272 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:56:16.398294 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:56:16.398317 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:56:16.398350 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:56:16.398371 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:56:16.398394 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.398413 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.398456 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.398481 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.398498 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:56:16.398525 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:56:16.398557 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.398599 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:56:16.398632 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:56:16.398661 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:56:16.398683 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:56:16.398714 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:56:16.398746 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:56:16.398769 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.398813 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.398843 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.398873 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:56:16.398910 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:56:16.398942 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:56:16.398963 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:56:16.398985 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399007 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:56:16.399028 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399052 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:56:16.399082 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399104 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:56:16.399121 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.399145 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399170 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399191 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399209 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399231 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.399253 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399275 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399295 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:56:16.399309 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399328 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:56:16.399366 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.399402 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.399416 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:56:16.399427 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:56:16.399440 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:56:16.399454 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399467 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:56:16.399491 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399517 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:56:16.399553 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.399565 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:56:16.399576 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:56:16.399588 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399598 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.399618 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.399629 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399640 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:56:16.399653 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:56:16.399671 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.399684 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.399694 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:56:16.399724 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:56:16.399741 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399752 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:56:16.399771 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.399782 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.399801 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.399822 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.399834 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399845 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.399857 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:56:16.399866 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:56:16.399885 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:56:16.399928 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:56:16.400087 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.400109 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.400130 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.400140 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.400147 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:56:16.400153 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:56:16.400162 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:56:16.400169 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:56:16.400175 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:56:16.400184 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:56:16.400193 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.400201 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:56:16.400213 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:56:16.400222 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:56:16.400233 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:56:16.400243 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.400253 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:56:16.400262 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:56:16.400272 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:56:16.400281 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:56:16.400693 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:56:16.400713 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:56:16.400724 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:56:16.400734 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:56:16.400742 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:56:16.400751 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:56:16.400760 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:56:16.400768 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:56:16.400780 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:56:16.400812 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:56:16.400833 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:56:16.400853 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:56:16.400868 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:56:16.400877 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:56:16.400887 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.400896 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:56:16.400903 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:56:16.400915 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:56:16.400924 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:56:16.400936 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:56:16.400950 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:56:16.400961 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:56:16.400972 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:56:16.400985 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:56:16.400993 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:56:16.401003 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:56:16.401016 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:56:16.401027 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:56:16.401036 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:56:16.401045 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:56:16.401053 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:56:16.401070 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:56:16.401083 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:56:16.401100 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.401120 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:56:16.401132 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:56:16.401141 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:56:16.401150 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:56:16.401160 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:56:16.401173 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:56:16.401180 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:56:16.401189 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:56:16.401198 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:56:16.401209 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:56:16.401217 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:56:16.401228 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:56:16.401415 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:56:16.401435 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:56:16.401444 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:56:16.401456 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:56:16.401467 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401486 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401508 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401529 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:56:16.401553 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:56:16.401575 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:56:16.401589 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:56:16.401602 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:56:16.401614 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:56:16.401628 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401640 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:56:16.401653 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:56:16.401667 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:56:16.401679 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:56:16.401693 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:56:16.401706 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:56:16.401720 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:56:16.401733 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.401745 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.401762 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:56:16.401816 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401840 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401871 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:56:16.401900 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.401920 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:56:16.401958 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.401977 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:56:16.401987 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.402002 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402019 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:56:16.402033 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402048 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:56:16.402065 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:56:16.402085 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.402107 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.402134 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402169 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402204 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:56:16.402228 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:56:16.402250 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402272 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402294 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:56:16.402314 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:56:16.402335 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:56:16.402349 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402367 17440 command_runner.go:130] > Dec 29 06:55:06 functional-695625 kubelet[6517]: E1229 06:55:06.329330 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="6.4s"
I1229 06:56:16.402405 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.090561 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.402421 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.592540 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:56:16.402433 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: I1229 06:55:08.994308 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.402444 17440 command_runner.go:130] > Dec 29 06:55:10 functional-695625 kubelet[6517]: E1229 06:55:10.291711 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402530 17440 command_runner.go:130] > Dec 29 06:55:20 functional-695625 kubelet[6517]: E1229 06:55:20.292793 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402557 17440 command_runner.go:130] > Dec 29 06:55:22 functional-695625 kubelet[6517]: E1229 06:55:22.729733 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:56:16.402569 17440 command_runner.go:130] > Dec 29 06:55:30 functional-695625 kubelet[6517]: E1229 06:55:30.293859 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402585 17440 command_runner.go:130] > Dec 29 06:55:39 functional-695625 kubelet[6517]: E1229 06:55:39.730496 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:56:16.402600 17440 command_runner.go:130] > Dec 29 06:55:40 functional-695625 kubelet[6517]: E1229 06:55:40.294978 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402639 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.093022 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.402655 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.996721 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:56:16.402666 17440 command_runner.go:130] > Dec 29 06:55:43 functional-695625 kubelet[6517]: I1229 06:55:43.798535 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:56:16.402677 17440 command_runner.go:130] > Dec 29 06:55:50 functional-695625 kubelet[6517]: E1229 06:55:50.295990 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402697 17440 command_runner.go:130] > Dec 29 06:55:56 functional-695625 kubelet[6517]: E1229 06:55:56.732252 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:56:16.402714 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.228455 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:56:16.402726 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.296294 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.402737 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.339811 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.402752 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.590728 6517 scope.go:122] "RemoveContainer" containerID="d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:56:16.402917 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.596576 6517 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:56:16.402934 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.196928 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:56:16.402947 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199564 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.402959 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199638 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.402972 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.199656 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.402996 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199813 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403011 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.211732 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403026 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.212086 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403043 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226269 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403056 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226760 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403070 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226846 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403082 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226932 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.403096 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240397 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403110 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.403125 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240759 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403138 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240798 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403152 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241099 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403292 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241133 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403310 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241440 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403325 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241482 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403339 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: I1229 06:56:02.241498 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403361 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241585 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403376 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246390 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403389 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246454 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403402 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246667 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403417 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246717 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403428 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: I1229 06:56:03.246732 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403450 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246832 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403464 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.297136 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:56:16.403480 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342375 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403495 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403506 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: I1229 06:56:10.342477 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403636 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342670 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403671 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593708 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403686 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593770 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403702 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598591 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403720 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598652 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403739 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606502 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403753 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606600 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:56:16.403767 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302101 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403780 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302675 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.403806 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302176 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403820 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302763 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403833 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: I1229 06:56:11.302780 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403850 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302307 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403871 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302816 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:56:16.403890 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.321043 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:56:16.403914 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.321965 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.403936 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322030 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.403952 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.322044 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:56:16.403976 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322163 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.403994 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323008 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.404007 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323148 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:56:16.404022 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:56:16.404034 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336097 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:56:16.404046 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: I1229 06:56:13.336114 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:56:16.404066 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336243 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:56:16.404085 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.733654 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:56:16.404122 17440 command_runner.go:130] > Dec 29 06:56:16 functional-695625 kubelet[6517]: E1229 06:56:16.095560 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:56:16.454878 17440 logs.go:123] Gathering logs for kube-scheduler [4d49952084c9] ...
I1229 06:56:16.454917 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4d49952084c9"
I1229 06:56:16.478085 17440 command_runner.go:130] ! I1229 06:53:52.882050 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:56:16.478126 17440 command_runner.go:130] ! W1229 06:54:52.896472 1 authentication.go:397] Error looking up in-cluster authentication configuration: the server was unable to return a response in the time allotted, but may still be processing the request (get configmaps extension-apiserver-authentication)
I1229 06:56:16.478136 17440 command_runner.go:130] ! W1229 06:54:52.896499 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
I1229 06:56:16.478148 17440 command_runner.go:130] ! W1229 06:54:52.896506 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1229 06:56:16.478155 17440 command_runner.go:130] ! I1229 06:54:52.913597 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0"
I1229 06:56:16.478166 17440 command_runner.go:130] ! I1229 06:54:52.913622 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.478175 17440 command_runner.go:130] ! I1229 06:54:52.915784 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1229 06:56:16.478185 17440 command_runner.go:130] ! I1229 06:54:52.915816 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:56:16.478194 17440 command_runner.go:130] ! I1229 06:54:52.915823 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1229 06:56:16.478203 17440 command_runner.go:130] ! I1229 06:54:52.915940 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:56:16.478825 17440 logs.go:123] Gathering logs for kube-controller-manager [f48fc04e3475] ...
I1229 06:56:16.478843 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f48fc04e3475"
I1229 06:56:16.501568 17440 command_runner.go:130] ! I1229 06:56:01.090404 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:56:16.501592 17440 command_runner.go:130] ! I1229 06:56:01.103535 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:56:16.501601 17440 command_runner.go:130] ! I1229 06:56:01.103787 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.501610 17440 command_runner.go:130] ! I1229 06:56:01.105458 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:56:16.501623 17440 command_runner.go:130] ! I1229 06:56:01.105665 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.501630 17440 command_runner.go:130] ! I1229 06:56:01.105907 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:56:16.501636 17440 command_runner.go:130] ! I1229 06:56:01.105924 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:56:16.501982 17440 logs.go:123] Gathering logs for kube-controller-manager [17fe16a2822a] ...
I1229 06:56:16.501996 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 17fe16a2822a"
I1229 06:56:16.524487 17440 command_runner.go:130] ! I1229 06:53:51.283329 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:56:16.524514 17440 command_runner.go:130] ! I1229 06:53:51.303666 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:56:16.524523 17440 command_runner.go:130] ! I1229 06:53:51.303706 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:56:16.524767 17440 command_runner.go:130] ! I1229 06:53:51.307865 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:56:16.524788 17440 command_runner.go:130] ! I1229 06:53:51.308287 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:56:16.524805 17440 command_runner.go:130] ! I1229 06:53:51.309479 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:56:16.524812 17440 command_runner.go:130] ! I1229 06:53:51.309545 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:56:16.526406 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:56:16.526437 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:57:16.604286 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:57:16.606268 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.079810784s)
W1229 06:57:16.606306 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:57:16.606317 17440 logs.go:123] Gathering logs for kube-apiserver [18d0015c724a] ...
I1229 06:57:16.606331 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 18d0015c724a"
I1229 06:57:16.636305 17440 command_runner.go:130] ! Error response from daemon: No such container: 18d0015c724a
W1229 06:57:16.636367 17440 logs.go:130] failed kube-apiserver [18d0015c724a]: command: /bin/bash -c "docker logs --tail 400 18d0015c724a" /bin/bash -c "docker logs --tail 400 18d0015c724a": Process exited with status 1
stdout:
stderr:
Error response from daemon: No such container: 18d0015c724a
output:
** stderr **
Error response from daemon: No such container: 18d0015c724a
** /stderr **
I1229 06:57:16.636376 17440 logs.go:123] Gathering logs for kube-scheduler [a79d99ad3fde] ...
I1229 06:57:16.636391 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a79d99ad3fde"
I1229 06:57:16.657452 17440 command_runner.go:130] ! I1229 06:53:51.269699 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:57:19.160135 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:57:24.162053 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:57:24.162161 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-apiserver --format={{.ID}}
I1229 06:57:24.182182 17440 command_runner.go:130] > b206d555ad19
I1229 06:57:24.183367 17440 logs.go:282] 1 containers: [b206d555ad19]
I1229 06:57:24.183464 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_etcd --format={{.ID}}
I1229 06:57:24.206759 17440 command_runner.go:130] > 6b7711ee25a2
I1229 06:57:24.206821 17440 command_runner.go:130] > d81259f64136
I1229 06:57:24.206853 17440 logs.go:282] 2 containers: [6b7711ee25a2 d81259f64136]
I1229 06:57:24.206926 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_coredns --format={{.ID}}
I1229 06:57:24.228856 17440 command_runner.go:130] > 6f69ba6a1553
I1229 06:57:24.228897 17440 logs.go:282] 1 containers: [6f69ba6a1553]
I1229 06:57:24.228968 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-scheduler --format={{.ID}}
I1229 06:57:24.247867 17440 command_runner.go:130] > 4d49952084c9
I1229 06:57:24.247890 17440 command_runner.go:130] > a79d99ad3fde
I1229 06:57:24.249034 17440 logs.go:282] 2 containers: [4d49952084c9 a79d99ad3fde]
I1229 06:57:24.249130 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-proxy --format={{.ID}}
I1229 06:57:24.268209 17440 command_runner.go:130] > 8911777281f4
I1229 06:57:24.269160 17440 logs.go:282] 1 containers: [8911777281f4]
I1229 06:57:24.269243 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kube-controller-manager --format={{.ID}}
I1229 06:57:24.288837 17440 command_runner.go:130] > f48fc04e3475
I1229 06:57:24.288871 17440 command_runner.go:130] > 17fe16a2822a
I1229 06:57:24.290245 17440 logs.go:282] 2 containers: [f48fc04e3475 17fe16a2822a]
I1229 06:57:24.290337 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_kindnet --format={{.ID}}
I1229 06:57:24.312502 17440 logs.go:282] 0 containers: []
W1229 06:57:24.312531 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:57:24.312592 17440 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_storage-provisioner --format={{.ID}}
I1229 06:57:24.334811 17440 command_runner.go:130] > bd96b57aa9fc
I1229 06:57:24.334849 17440 logs.go:282] 1 containers: [bd96b57aa9fc]
I1229 06:57:24.334875 17440 logs.go:123] Gathering logs for kube-apiserver [b206d555ad19] ...
I1229 06:57:24.334888 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 b206d555ad19"
I1229 06:57:24.357541 17440 command_runner.go:130] ! I1229 06:57:22.434262 1 options.go:263] external host was not specified, using 192.168.39.121
I1229 06:57:24.357567 17440 command_runner.go:130] ! I1229 06:57:22.436951 1 server.go:150] Version: v1.35.0
I1229 06:57:24.357577 17440 command_runner.go:130] ! I1229 06:57:22.436991 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:57:24.357602 17440 command_runner.go:130] ! E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
W1229 06:57:24.359181 17440 logs.go:138] Found kube-apiserver [b206d555ad19] problem: E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:57:24.359206 17440 logs.go:123] Gathering logs for kube-controller-manager [f48fc04e3475] ...
I1229 06:57:24.359218 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 f48fc04e3475"
I1229 06:57:24.381077 17440 command_runner.go:130] ! I1229 06:56:01.090404 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:57:24.381103 17440 command_runner.go:130] ! I1229 06:56:01.103535 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:57:24.381113 17440 command_runner.go:130] ! I1229 06:56:01.103787 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:57:24.381121 17440 command_runner.go:130] ! I1229 06:56:01.105458 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:57:24.381131 17440 command_runner.go:130] ! I1229 06:56:01.105665 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:57:24.381137 17440 command_runner.go:130] ! I1229 06:56:01.105907 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:57:24.381144 17440 command_runner.go:130] ! I1229 06:56:01.105924 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:57:24.382680 17440 logs.go:123] Gathering logs for container status ...
I1229 06:57:24.382711 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:57:24.427354 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
I1229 06:57:24.427382 17440 command_runner.go:130] > b206d555ad194 5c6acd67e9cd1 2 seconds ago Exited kube-apiserver 5 d3819cc8ab802 kube-apiserver-functional-695625 kube-system
I1229 06:57:24.427400 17440 command_runner.go:130] > f48fc04e34751 2c9a4b058bd7e About a minute ago Running kube-controller-manager 2 0a96e34d38f8c kube-controller-manager-functional-695625 kube-system
I1229 06:57:24.427411 17440 command_runner.go:130] > 6b7711ee25a2d 0a108f7189562 About a minute ago Running etcd 2 173054afc2f39 etcd-functional-695625 kube-system
I1229 06:57:24.427421 17440 command_runner.go:130] > 4d49952084c92 550794e3b12ac 3 minutes ago Running kube-scheduler 2 fefef7c5591ea kube-scheduler-functional-695625 kube-system
I1229 06:57:24.427441 17440 command_runner.go:130] > 6f69ba6a1553a aa5e3ebc0dfed 3 minutes ago Exited coredns 1 a014f32abcd01 coredns-7d764666f9-wfq7m kube-system
I1229 06:57:24.427454 17440 command_runner.go:130] > d81259f64136c 0a108f7189562 3 minutes ago Exited etcd 1 1fc5fa7d92959 etcd-functional-695625 kube-system
I1229 06:57:24.427465 17440 command_runner.go:130] > 17fe16a2822a8 2c9a4b058bd7e 3 minutes ago Exited kube-controller-manager 1 98261fa185f6e kube-controller-manager-functional-695625 kube-system
I1229 06:57:24.427477 17440 command_runner.go:130] > a79d99ad3fde3 550794e3b12ac 3 minutes ago Exited kube-scheduler 1 b046056ff071b kube-scheduler-functional-695625 kube-system
I1229 06:57:24.427488 17440 command_runner.go:130] > bd96b57aa9fce 6e38f40d628db 4 minutes ago Exited storage-provisioner 0 64853b50a6c5e storage-provisioner kube-system
I1229 06:57:24.427509 17440 command_runner.go:130] > 8911777281f41 32652ff1bbe6b 4 minutes ago Exited kube-proxy 0 548561c7ada8f kube-proxy-g7lp9 kube-system
I1229 06:57:24.430056 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:57:24.430095 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:57:24.453665 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239338 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453712 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.239383 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453738 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244411 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:57:24.453770 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.244504 2634 container_log_manager.go:154] "Failed to rotate container logs" err="failed to list containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453809 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458139 2634 log.go:32] "ListContainers with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter=""
I1229 06:57:24.453838 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.458218 2634 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to list pod stats: failed to get pod or container map: failed to list all containers: rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453867 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926377 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.453891 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926435 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453911 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.926447 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453928 17440 command_runner.go:130] > Dec 29 06:53:31 functional-695625 kubelet[2634]: E1229 06:53:31.994121 2634 log.go:32] "Version from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453945 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927827 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.453961 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927867 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.453974 17440 command_runner.go:130] > Dec 29 06:53:32 functional-695625 kubelet[2634]: E1229 06:53:32.927930 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454002 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140553 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454022 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140635 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454040 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140653 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454058 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.140664 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454074 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930020 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454087 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930083 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454103 17440 command_runner.go:130] > Dec 29 06:53:33 functional-695625 kubelet[2634]: E1229 06:53:33.930129 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454120 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932311 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454135 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932363 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454149 17440 command_runner.go:130] > Dec 29 06:53:34 functional-695625 kubelet[2634]: E1229 06:53:34.932375 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454165 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140618 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454179 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140679 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454194 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140697 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454208 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.140709 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454224 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933321 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454246 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933382 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454262 17440 command_runner.go:130] > Dec 29 06:53:35 functional-695625 kubelet[2634]: E1229 06:53:35.933393 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454276 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241324 2634 log.go:32] "Status from runtime service failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454294 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.241391 2634 kubelet.go:3115] "Container runtime sanity check failed" err="rpc error: code = Unknown desc = failed to get docker info from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454310 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935649 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454326 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935930 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454342 17440 command_runner.go:130] > Dec 29 06:53:36 functional-695625 kubelet[2634]: E1229 06:53:36.935948 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454358 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140389 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454371 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140507 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454386 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140525 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454401 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.140536 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454423 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937258 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454447 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937350 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454472 17440 command_runner.go:130] > Dec 29 06:53:37 functional-695625 kubelet[2634]: E1229 06:53:37.937364 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454500 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939069 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454519 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939129 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454533 17440 command_runner.go:130] > Dec 29 06:53:38 functional-695625 kubelet[2634]: E1229 06:53:38.939141 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454549 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139354 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="state:{}"
I1229 06:57:24.454565 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139413 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454579 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139428 2634 kubelet_pods.go:1263] "Error listing containers" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454593 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.139440 2634 kubelet.go:2687] "Failed cleaning pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454608 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941237 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?" filter="<nil>"
I1229 06:57:24.454625 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941285 2634 kuberuntime_sandbox.go:297] "Failed to list pod sandboxes" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454640 17440 command_runner.go:130] > Dec 29 06:53:39 functional-695625 kubelet[2634]: E1229 06:53:39.941296 2634 generic.go:254] "GenericPLEG: Unable to retrieve pods" err="rpc error: code = Unknown desc = Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.454655 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.109014 2634 kubelet.go:2691] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="6.97s"
I1229 06:57:24.454667 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.125762 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:57:24.454680 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.129855 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.454697 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.131487 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.454714 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.140438 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:57:24.454729 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.454741 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:57:24.454816 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:57:24.454842 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:57:24.454855 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:57:24.454870 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:57:24.454881 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:57:24.454896 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:57:24.454912 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:57:24.454940 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:57:24.454957 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:57:24.454969 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:57:24.454987 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:57:24.455012 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:57:24.455025 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:57:24.455039 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.455055 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.455081 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.455097 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:57:24.455110 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:57:24.455125 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:57:24.455144 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.455165 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:57:24.455186 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:57:24.455204 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:57:24.455224 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:57:24.455243 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:57:24.455275 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:57:24.455294 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.455310 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.455326 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.455345 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:57:24.455366 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:57:24.455386 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:57:24.455404 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:57:24.455423 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455446 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:57:24.455472 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455490 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:57:24.455506 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455528 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:57:24.455550 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.455573 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455588 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455603 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455615 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.455628 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.455640 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455657 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455669 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:57:24.455681 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455699 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:57:24.455720 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.455739 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.455750 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:57:24.455810 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:57:24.455823 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:57:24.455835 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.455848 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:57:24.455860 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.455872 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:57:24.455892 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.455904 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:57:24.455916 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:57:24.455930 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.455967 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.455990 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.456008 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.456019 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:57:24.456031 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:57:24.456052 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.456067 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.456078 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:57:24.456100 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:57:24.456114 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.456124 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:57:24.456144 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.456159 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.456169 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.456191 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.456205 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.456216 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.456229 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:57:24.456239 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:57:24.456260 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:57:24.456304 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:57:24.456318 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.456331 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.456352 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.456364 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:57:24.456372 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:57:24.456379 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:57:24.456386 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:57:24.456396 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:57:24.456406 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:57:24.456423 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:57:24.456441 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:57:24.456458 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:57:24.456472 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:57:24.456487 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:57:24.456503 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:57:24.456520 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:57:24.456540 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:57:24.456560 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:57:24.456573 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:57:24.456584 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:57:24.456626 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:57:24.456639 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:57:24.456647 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:57:24.456657 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:57:24.456665 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:57:24.456676 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:57:24.456685 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:57:24.456695 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:57:24.456703 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:57:24.456714 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:57:24.456726 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:57:24.456739 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:57:24.456748 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:57:24.456761 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:57:24.456771 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.456782 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:57:24.456790 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:57:24.456811 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:57:24.456821 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:57:24.456832 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:57:24.456845 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:57:24.456853 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:57:24.456866 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:57:24.456875 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:57:24.456885 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:57:24.456893 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:57:24.456907 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:57:24.456918 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:57:24.456927 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:57:24.456937 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:57:24.456947 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:57:24.456959 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:57:24.456971 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:57:24.456990 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.457011 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.457023 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:57:24.457032 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:57:24.457044 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:57:24.457054 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:57:24.457067 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:57:24.457074 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:57:24.457083 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:57:24.457093 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:57:24.457105 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:57:24.457112 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:57:24.457125 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:57:24.457133 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:57:24.457145 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:57:24.457154 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:57:24.457168 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:57:24.457178 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457192 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457205 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457220 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:57:24.457235 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:57:24.457247 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:57:24.457258 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:57:24.457271 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:57:24.457284 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:57:24.457299 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457310 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:57:24.457322 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:57:24.457333 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:57:24.457345 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:57:24.457359 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:57:24.457370 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:57:24.457381 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:57:24.457396 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.457410 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.457436 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:57:24.457460 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457481 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457500 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:57:24.457515 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457533 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:57:24.457586 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.457604 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.457613 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.457633 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457649 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:57:24.457664 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.457680 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:57:24.457697 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:57:24.457717 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.457740 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.457763 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457785 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457817 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:57:24.457904 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:57:24.457927 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457948 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457976 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:57:24.457996 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:57:24.458019 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:57:24.458034 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458050 17440 command_runner.go:130] > Dec 29 06:55:06 functional-695625 kubelet[6517]: E1229 06:55:06.329330 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="6.4s"
I1229 06:57:24.458090 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.090561 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.458106 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.592540 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.458116 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: I1229 06:55:08.994308 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.458130 17440 command_runner.go:130] > Dec 29 06:55:10 functional-695625 kubelet[6517]: E1229 06:55:10.291711 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458141 17440 command_runner.go:130] > Dec 29 06:55:20 functional-695625 kubelet[6517]: E1229 06:55:20.292793 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458158 17440 command_runner.go:130] > Dec 29 06:55:22 functional-695625 kubelet[6517]: E1229 06:55:22.729733 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:57:24.458170 17440 command_runner.go:130] > Dec 29 06:55:30 functional-695625 kubelet[6517]: E1229 06:55:30.293859 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458184 17440 command_runner.go:130] > Dec 29 06:55:39 functional-695625 kubelet[6517]: E1229 06:55:39.730496 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:57:24.458198 17440 command_runner.go:130] > Dec 29 06:55:40 functional-695625 kubelet[6517]: E1229 06:55:40.294978 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458263 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.093022 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.458295 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.996721 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.458316 17440 command_runner.go:130] > Dec 29 06:55:43 functional-695625 kubelet[6517]: I1229 06:55:43.798535 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.458339 17440 command_runner.go:130] > Dec 29 06:55:50 functional-695625 kubelet[6517]: E1229 06:55:50.295990 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458367 17440 command_runner.go:130] > Dec 29 06:55:56 functional-695625 kubelet[6517]: E1229 06:55:56.732252 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.458389 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.228455 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:57:24.458409 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.296294 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458429 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.339811 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.458447 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.590728 6517 scope.go:122] "RemoveContainer" containerID="d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:57:24.458468 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.596576 6517 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:57:24.458490 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.196928 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.458512 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199564 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458529 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199638 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458542 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.199656 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458572 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199813 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458587 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.211732 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458602 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.212086 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.458617 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226269 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458632 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226760 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.458644 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226846 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458659 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226932 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.458674 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240397 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458686 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.458702 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240759 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458717 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240798 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.458732 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241099 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458746 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241133 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.458762 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241440 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458777 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241482 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458790 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: I1229 06:56:02.241498 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458824 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241585 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458839 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246390 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458852 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246454 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.458865 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246667 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458879 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246717 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458889 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: I1229 06:56:03.246732 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458911 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246832 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458925 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.297136 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.458939 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342375 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.458952 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.458964 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: I1229 06:56:10.342477 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.458983 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342670 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.458998 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593708 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459016 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593770 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.459031 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598591 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459048 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598652 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.459062 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606502 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459076 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606600 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.459090 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302101 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459104 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302675 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.459118 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302176 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459132 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302763 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459145 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: I1229 06:56:11.302780 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.459158 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302307 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459174 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302816 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:57:24.459186 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.321043 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:57:24.459201 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.321965 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459215 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322030 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459225 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.322044 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459247 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322163 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459261 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323008 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459274 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323148 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:57:24.459286 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459302 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336097 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459314 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: I1229 06:56:13.336114 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459334 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336243 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459352 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.733654 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.459392 17440 command_runner.go:130] > Dec 29 06:56:16 functional-695625 kubelet[6517]: E1229 06:56:16.095560 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.459418 17440 command_runner.go:130] > Dec 29 06:56:17 functional-695625 kubelet[6517]: E1229 06:56:17.801052 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.459438 17440 command_runner.go:130] > Dec 29 06:56:19 functional-695625 kubelet[6517]: I1229 06:56:19.403026 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.459461 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.297746 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.459483 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342467 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459502 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342554 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459515 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.342589 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459537 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342829 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459552 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.385984 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459567 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386062 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459579 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.386078 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459599 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386220 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459613 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.298955 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.459634 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.734998 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.459649 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185639 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459662 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185732 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459676 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.185750 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459693 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493651 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459707 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493733 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459720 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.493755 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459741 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493996 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459753 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.510294 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:57:24.459769 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511464 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459782 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511520 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459806 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.511535 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459829 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511684 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459845 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525404 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459859 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525467 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459875 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: I1229 06:56:34.525482 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459897 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525663 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459911 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.300040 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.459924 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342011 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.459938 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342082 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.459950 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.342099 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.459972 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342223 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.459987 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567456 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460000 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567665 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460016 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.567686 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.460036 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.568152 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.460053 17440 command_runner.go:130] > Dec 29 06:56:47 functional-695625 kubelet[6517]: E1229 06:56:47.736964 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:57:24.460094 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.098168 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.27202431 +0000 UTC m=+0.287773690,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.460108 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.300747 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460124 17440 command_runner.go:130] > Dec 29 06:56:53 functional-695625 kubelet[6517]: E1229 06:56:53.405155 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:57:24.460136 17440 command_runner.go:130] > Dec 29 06:56:56 functional-695625 kubelet[6517]: I1229 06:56:56.606176 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:57:24.460148 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.301915 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460162 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.330173 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:57:24.460182 17440 command_runner.go:130] > Dec 29 06:57:04 functional-695625 kubelet[6517]: E1229 06:57:04.738681 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.460195 17440 command_runner.go:130] > Dec 29 06:57:10 functional-695625 kubelet[6517]: E1229 06:57:10.302083 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460206 17440 command_runner.go:130] > Dec 29 06:57:20 functional-695625 kubelet[6517]: E1229 06:57:20.302612 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:57:24.460221 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185645 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460236 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185704 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:57:24.460254 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.740062 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:57:24.460269 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.185952 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460283 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.186017 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460296 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.186034 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.460308 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.873051 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:57:24.460321 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874264 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460334 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874357 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460347 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.874375 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:57:24.460367 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874499 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.460381 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:57:24.460395 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892083 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:57:24.460414 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: I1229 06:57:23.892098 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:57:24.460450 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892218 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:57:24.460499 17440 command_runner.go:130] > Dec 29 06:57:24 functional-695625 kubelet[6517]: E1229 06:57:24.100978 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.27223373 +0000 UTC m=+0.287983111,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:57:24.513870 17440 logs.go:123] Gathering logs for etcd [d81259f64136] ...
I1229 06:57:24.513913 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 d81259f64136"
I1229 06:57:24.542868 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517725Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:57:24.542904 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.517828Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:57:24.542974 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.517848Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:57:24.542992 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519323Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:57:24.543020 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.519372Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:57:24.543037 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.519700Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:57:24.543067 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.522332Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:57:24.543085 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.530852Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:57:24.543199 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.531312Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:57:24.543237 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.533505Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc00006a930}"}
I1229 06:57:24.543258 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.562961Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:57:24.543276 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.566967Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"33.344174ms"}
I1229 06:57:24.543291 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.569353Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":497}
I1229 06:57:24.543306 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596637Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:57:24.543327 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596694Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:57:24.543344 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.596795Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:57:24.543365 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.620855Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":497}
I1229 06:57:24.543380 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.621587Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:57:24.543393 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624518Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:57:24.543419 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624664Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:57:24.543437 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624700Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:57:24.543464 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624712Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:57:24.543483 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624720Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:57:24.543499 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624728Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:57:24.543511 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624764Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:57:24.543561 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624799Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:57:24.543585 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624856Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 2"}
I1229 06:57:24.543605 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.624867Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 2, commit: 497, applied: 0, lastindex: 497, lastterm: 2]"}
I1229 06:57:24.543623 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:50.634002Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:57:24.543659 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.644772Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:57:24.543680 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.681530Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:57:24.543701 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686046Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:57:24.543722 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686350Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:57:24.543744 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.686391Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:57:24.543770 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687141Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:57:24.543821 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687399Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:57:24.543840 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687425Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:57:24.543865 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687475Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:57:24.543886 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687536Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:57:24.543908 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687564Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:57:24.543927 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687571Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:57:24.543945 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687702Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:57:24.543962 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.687713Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:57:24.543980 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692750Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:57:24.544010 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.692847Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:57:24.544031 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.694703Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:57:24.544065 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830725Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 2"}
I1229 06:57:24.544084 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830765Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 2"}
I1229 06:57:24.544103 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830844Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 2"}
I1229 06:57:24.544120 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830858Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:57:24.544136 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.830936Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 3"}
I1229 06:57:24.544157 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832101Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:57:24.544176 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832130Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:57:24.544193 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832148Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 3"}
I1229 06:57:24.544213 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.832166Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 3"}
I1229 06:57:24.544224 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835446Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:57:24.544248 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.835384Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:57:24.544264 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839145Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:57:24.544283 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.839733Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:57:24.544298 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851017Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:57:24.544314 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851067Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:57:24.544331 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.851748Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:57:24.544345 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.856729Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:57:24.544364 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:50.869216Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:57:24.544381 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706108Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
I1229 06:57:24.544405 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:51.706269Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:57:24.544430 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:51.706381Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:57:24.544465 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.707655Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
I1229 06:57:24.544517 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.709799Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544537 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.709913Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cbdf275f553df7c2","current-leader-member-id":"cbdf275f553df7c2"}
I1229 06:57:24.544554 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710255Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
I1229 06:57:24.544575 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710690Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:57:24.544595 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.710782Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
I1229 06:57:24.544623 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.710832Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544641 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.710742Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
I1229 06:57:24.544662 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711002Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:57:24.544683 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:53:58.711035Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.121:2379: use of closed network connection"}
I1229 06:57:24.544711 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.711045Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544730 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717452Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:57:24.544767 17440 command_runner.go:130] ! {"level":"error","ts":"2025-12-29T06:53:58.717551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.121:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
I1229 06:57:24.544807 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717601Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:57:24.544828 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:53:58.717654Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-695625","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"]}
I1229 06:57:24.552509 17440 logs.go:123] Gathering logs for coredns [6f69ba6a1553] ...
I1229 06:57:24.552540 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6f69ba6a1553"
I1229 06:57:24.575005 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:57:24.575036 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:57:24.597505 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.597545 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.597560 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.597577 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.597596 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:57:24.597610 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:57:24.597628 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:57:24.597642 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:57:24.597654 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.597667 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:57:24.597682 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.597705 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.597733 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.597753 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.597765 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.597773 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:57:24.597803 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.597814 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:57:24.597825 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.597834 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:57:24.597841 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:57:24.597848 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:57:24.597856 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.597866 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.597874 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.597883 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.597900 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.597909 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.597916 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.597925 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.597936 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.597944 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.597953 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:57:24.597960 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:57:24.597973 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:57:24.597981 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:57:24.597991 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:57:24.597999 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:57:24.598010 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:57:24.598017 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:57:24.598029 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:57:24.598041 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:57:24.598054 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598067 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598074 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:57:24.598084 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:57:24.598095 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:57:24.598104 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:57:24.598111 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:57:24.598117 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:57:24.598126 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.598132 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:57:24.598141 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.598154 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.598174 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.598186 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.598196 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.598205 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:57:24.598224 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.598235 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:57:24.598246 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.598256 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:57:24.598264 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.598273 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.598281 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.598289 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.598297 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.598306 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.598314 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:57:24.598320 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:57:24.598327 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:57:24.598334 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.598345 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.598354 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.598365 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.598373 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:57:24.598381 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:57:24.598389 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:57:24.598400 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:57:24.598415 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:57:24.598431 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:57:24.598447 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:57:24.598463 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:57:24.598476 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:57:24.598492 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:57:24.598503 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:57:24.598513 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:57:24.598522 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:57:24.598531 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:57:24.598538 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.598545 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:57:24.598555 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.598578 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.598591 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.598602 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.598613 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.598621 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:57:24.598642 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.598653 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:57:24.598664 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.598674 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:57:24.598683 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.598693 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.598701 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.598716 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.598724 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.598732 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.598760 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598774 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598787 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598815 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598832 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:57:24.598845 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598860 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598873 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598889 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.598904 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.598918 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.598933 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598946 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:57:24.598958 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:57:24.598973 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:57:24.598980 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:57:24.598989 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:57:24.598999 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:57:24.599008 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:57:24.599015 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:57:24.599022 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:57:24.599030 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:57:24.599036 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:57:24.599043 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:57:24.599054 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:57:24.599065 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:57:24.599077 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:57:24.599088 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:57:24.599099 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:57:24.599107 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:57:24.599120 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599138 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599151 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599168 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599185 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599198 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599213 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599228 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599241 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599257 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599270 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599285 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599297 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.599319 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:57:24.599331 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:57:24.599346 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:57:24.599359 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:57:24.599376 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:57:24.599387 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:57:24.599405 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:57:24.599423 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:57:24.599452 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:57:24.599472 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:57:24.599489 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:57:24.599503 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:57:24.599517 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:57:24.599529 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:57:24.599544 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:57:24.599559 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:57:24.599572 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:57:24.599587 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:57:24.599602 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:57:24.599615 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:57:24.599631 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:57:24.599644 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:57:24.599654 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:57:24.599664 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:57:24.599673 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:57:24.599682 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:57:24.599692 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:57:24.599700 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:57:24.599710 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:57:24.599747 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:57:24.599756 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:57:24.599772 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:57:24.599782 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:57:24.599789 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:57:24.599806 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:57:24.599814 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:57:24.599822 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:57:24.599830 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:57:24.599841 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:57:24.599849 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:57:24.599860 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:57:24.599868 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:57:24.599879 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:57:24.599886 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:57:24.599896 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:57:24.599907 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:57:24.599914 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:57:24.599922 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:57:24.599934 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.599953 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.599970 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.599983 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600000 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600017 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600034 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:57:24.600049 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600063 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600079 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600092 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600107 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600121 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600137 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600152 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600164 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600177 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600190 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600207 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:57:24.600223 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600235 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:57:24.600247 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:57:24.600261 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600276 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:57:24.600288 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:57:24.600304 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.600317 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:57:24.600331 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.600345 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:57:24.600357 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 dockerd[4014]: time="2025-12-29T06:56:00.626282205Z" level=info msg="ignoring event" container=78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600373 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 dockerd[4014]: time="2025-12-29T06:56:11.553142622Z" level=info msg="ignoring event" container=18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600386 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 dockerd[4014]: time="2025-12-29T06:56:32.448119389Z" level=info msg="ignoring event" container=0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.600403 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:57:24.600423 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:57:24.600448 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:57:24.600472 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:57:24.600490 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 dockerd[4014]: time="2025-12-29T06:57:22.465508622Z" level=info msg="ignoring event" container=b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:57:24.619075 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:57:24.619123 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:58:24.700496 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:58:24.700542 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.081407425s)
W1229 06:58:24.700578 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:58:24.700591 17440 logs.go:123] Gathering logs for etcd [6b7711ee25a2] ...
I1229 06:58:24.700607 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 6b7711ee25a2"
I1229 06:58:24.726206 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.924768Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:58:24.726238 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925193Z","caller":"etcdmain/config.go:270","msg":"--snapshot-count is deprecated in 3.6 and will be decommissioned in 3.7."}
I1229 06:58:24.726283 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925252Z","caller":"etcdmain/etcd.go:64","msg":"Running: ","args":["etcd","--advertise-client-urls=https://192.168.39.121:2379","--cert-file=/var/lib/minikube/certs/etcd/server.crt","--client-cert-auth=true","--data-dir=/var/lib/minikube/etcd","--feature-gates=InitialCorruptCheck=true","--initial-advertise-peer-urls=https://192.168.39.121:2380","--initial-cluster=functional-695625=https://192.168.39.121:2380","--key-file=/var/lib/minikube/certs/etcd/server.key","--listen-client-urls=https://127.0.0.1:2379,https://192.168.39.121:2379","--listen-metrics-urls=http://127.0.0.1:2381","--listen-peer-urls=https://192.168.39.121:2380","--name=functional-695625","--peer-cert-file=/var/lib/minikube/certs/etcd/peer.crt","--peer-client-cert-auth=true","--peer-key-file=/var/lib/minikube/certs/etcd/peer.key","--peer-trusted-ca-file=/var/lib/minikube/certs/etcd/ca.crt","--snapshot-count=10000","--trusted-ca-file=/var/lib
/minikube/certs/etcd/ca.crt","--watch-progress-notify-interval=5s"]}
I1229 06:58:24.726296 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925487Z","caller":"etcdmain/etcd.go:107","msg":"server has already been initialized","data-dir":"/var/lib/minikube/etcd","dir-type":"member"}
I1229 06:58:24.726311 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.925602Z","caller":"embed/config.go:1209","msg":"Running http and grpc server on single port. This is not recommended for production."}
I1229 06:58:24.726321 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925710Z","caller":"embed/etcd.go:138","msg":"configuring peer listeners","listen-peer-urls":["https://192.168.39.121:2380"]}
I1229 06:58:24.726342 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.925810Z","caller":"embed/etcd.go:544","msg":"starting with peer TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/peer.crt, key = /var/lib/minikube/certs/etcd/peer.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:58:24.726358 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.934471Z","caller":"embed/etcd.go:146","msg":"configuring client listeners","listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"]}
I1229 06:58:24.726438 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.935217Z","caller":"embed/etcd.go:323","msg":"starting an etcd server","etcd-version":"3.6.6","git-sha":"d2809cf","go-version":"go1.24.10","go-os":"linux","go-arch":"amd64","max-cpu-set":2,"max-cpu-available":2,"member-initialized":true,"name":"functional-695625","data-dir":"/var/lib/minikube/etcd","wal-dir":"","wal-dir-dedicated":"","member-dir":"/var/lib/minikube/etcd/member","force-new-cluster":false,"heartbeat-interval":"100ms","election-timeout":"1s","initial-election-tick-advance":true,"snapshot-count":10000,"max-wals":5,"max-snapshots":5,"snapshot-catchup-entries":5000,"initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"],"experimental-local-address":"","cors":["
*"],"host-whitelist":["*"],"initial-cluster":"","initial-cluster-state":"new","initial-cluster-token":"","quota-backend-bytes":2147483648,"max-request-bytes":1572864,"max-concurrent-streams":4294967295,"pre-vote":true,"feature-gates":"InitialCorruptCheck=true","initial-corrupt-check":false,"corrupt-check-time-interval":"0s","compact-check-time-interval":"1m0s","auto-compaction-mode":"periodic","auto-compaction-retention":"0s","auto-compaction-interval":"0s","discovery-url":"","discovery-proxy":"","discovery-token":"","discovery-endpoints":"","discovery-dial-timeout":"2s","discovery-request-timeout":"5s","discovery-keepalive-time":"2s","discovery-keepalive-timeout":"6s","discovery-insecure-transport":true,"discovery-insecure-skip-tls-verify":false,"discovery-cert":"","discovery-key":"","discovery-cacert":"","discovery-user":"","downgrade-check-interval":"5s","max-learners":1,"v2-deprecation":"write-only"}
I1229 06:58:24.726461 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.937503Z","logger":"bbolt","caller":"backend/backend.go:203","msg":"Opening db file (/var/lib/minikube/etcd/member/snap/db) with mode -rw------- and with options: {Timeout: 0s, NoGrowSync: false, NoFreelistSync: true, PreLoadFreelist: false, FreelistType: hashmap, ReadOnly: false, MmapFlags: 8000, InitialMmapSize: 10737418240, PageSize: 0, NoSync: false, OpenFile: 0x0, Mlock: false, Logger: 0xc000068080}"}
I1229 06:58:24.726472 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940423Z","logger":"bbolt","caller":"bbolt@v1.4.3/db.go:321","msg":"Opening bbolt db (/var/lib/minikube/etcd/member/snap/db) successfully"}
I1229 06:58:24.726483 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.940850Z","caller":"storage/backend.go:80","msg":"opened backend db","path":"/var/lib/minikube/etcd/member/snap/db","took":"3.479356ms"}
I1229 06:58:24.726492 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.941120Z","caller":"etcdserver/bootstrap.go:220","msg":"restore consistentIndex","index":499}
I1229 06:58:24.726503 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945006Z","caller":"etcdserver/bootstrap.go:441","msg":"No snapshot found. Recovering WAL from scratch!"}
I1229 06:58:24.726517 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945707Z","caller":"etcdserver/bootstrap.go:232","msg":"recovered v3 backend","backend-size-bytes":1019904,"backend-size":"1.0 MB","backend-size-in-use-bytes":999424,"backend-size-in-use":"999 kB"}
I1229 06:58:24.726528 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.945966Z","caller":"etcdserver/bootstrap.go:90","msg":"Bootstrapping WAL from snapshot"}
I1229 06:58:24.726540 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.951906Z","caller":"etcdserver/bootstrap.go:599","msg":"restarting local member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","commit-index":499}
I1229 06:58:24.726552 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952063Z","caller":"etcdserver/bootstrap.go:94","msg":"bootstrapping cluster"}
I1229 06:58:24.726560 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952160Z","caller":"etcdserver/bootstrap.go:101","msg":"bootstrapping storage"}
I1229 06:58:24.726577 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952338Z","caller":"membership/cluster.go:605","msg":"Detected member only in v3store but missing in v2store","member":"{ID:cbdf275f553df7c2 RaftAttributes:{PeerURLs:[https://192.168.39.121:2380] IsLearner:false} Attributes:{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}}"}
I1229 06:58:24.726590 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952385Z","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.6"}
I1229 06:58:24.726607 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952396Z","caller":"membership/cluster.go:297","msg":"recovered/added member from store","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","recovered-remote-peer-id":"cbdf275f553df7c2","recovered-remote-peer-urls":["https://192.168.39.121:2380"],"recovered-remote-peer-is-learner":false}
I1229 06:58:24.726618 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952406Z","caller":"membership/cluster.go:307","msg":"set cluster version from store","cluster-version":"3.6"}
I1229 06:58:24.726629 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952416Z","caller":"etcdserver/bootstrap.go:109","msg":"bootstrapping raft"}
I1229 06:58:24.726636 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952460Z","caller":"etcdserver/server.go:312","msg":"bootstrap successfully"}
I1229 06:58:24.726647 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952535Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=()"}
I1229 06:58:24.726657 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952604Z","logger":"raft","caller":"v3@v3.6.0/raft.go:897","msg":"cbdf275f553df7c2 became follower at term 3"}
I1229 06:58:24.726670 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.952619Z","logger":"raft","caller":"v3@v3.6.0/raft.go:493","msg":"newRaft cbdf275f553df7c2 [peers: [], term: 3, commit: 499, applied: 0, lastindex: 499, lastterm: 3]"}
I1229 06:58:24.726680 17440 command_runner.go:130] ! {"level":"warn","ts":"2025-12-29T06:56:00.955095Z","caller":"auth/store.go:1135","msg":"simple token is not cryptographically signed"}
I1229 06:58:24.726698 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.961356Z","caller":"mvcc/kvstore.go:408","msg":"kvstore restored","current-rev":465}
I1229 06:58:24.726711 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.967658Z","caller":"storage/quota.go:93","msg":"enabled backend quota with default value","quota-name":"v3-applier","quota-size-bytes":2147483648,"quota-size":"2.1 GB"}
I1229 06:58:24.726723 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.968487Z","caller":"etcdserver/corrupt.go:91","msg":"starting initial corruption check","local-member-id":"cbdf275f553df7c2","timeout":"7s"}
I1229 06:58:24.726735 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969020Z","caller":"etcdserver/corrupt.go:172","msg":"initial corruption checking passed; no corruption","local-member-id":"cbdf275f553df7c2"}
I1229 06:58:24.726750 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969260Z","caller":"etcdserver/server.go:589","msg":"starting etcd server","local-member-id":"cbdf275f553df7c2","local-server-version":"3.6.6","cluster-id":"6f38b6947d3f1f22","cluster-version":"3.6"}
I1229 06:58:24.726765 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.969708Z","caller":"etcdserver/server.go:483","msg":"started as single-node; fast-forwarding election ticks","local-member-id":"cbdf275f553df7c2","forward-ticks":9,"forward-duration":"900ms","election-ticks":10,"election-timeout":"1s"}
I1229 06:58:24.726784 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970043Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
I1229 06:58:24.726826 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970828Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cbdf275f553df7c2","initial-advertise-peer-urls":["https://192.168.39.121:2380"],"listen-peer-urls":["https://192.168.39.121:2380"],"advertise-client-urls":["https://192.168.39.121:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.121:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
I1229 06:58:24.726839 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971046Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
I1229 06:58:24.726848 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970057Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.121:2380"}
I1229 06:58:24.726858 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971258Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.121:2380"}
I1229 06:58:24.726870 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970152Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
I1229 06:58:24.726883 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971336Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
I1229 06:58:24.726896 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.971370Z","caller":"fileutil/purge.go:49","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
I1229 06:58:24.726906 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.970393Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"cbdf275f553df7c2 switched to configuration voters=(14690503799911348162)"}
I1229 06:58:24.726922 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972410Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","added-peer-id":"cbdf275f553df7c2","added-peer-peer-urls":["https://192.168.39.121:2380"],"added-peer-is-learner":false}
I1229 06:58:24.726935 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:00.972698Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"6f38b6947d3f1f22","local-member-id":"cbdf275f553df7c2","from":"3.6","to":"3.6"}
I1229 06:58:24.726947 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353814Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cbdf275f553df7c2 is starting a new election at term 3"}
I1229 06:58:24.726956 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353865Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cbdf275f553df7c2 became pre-candidate at term 3"}
I1229 06:58:24.726969 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.353992Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgPreVoteResp from cbdf275f553df7c2 at term 3"}
I1229 06:58:24.726982 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354007Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
I1229 06:58:24.726997 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.354031Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cbdf275f553df7c2 became candidate at term 4"}
I1229 06:58:24.727009 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355853Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cbdf275f553df7c2 received MsgVoteResp from cbdf275f553df7c2 at term 4"}
I1229 06:58:24.727020 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355920Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cbdf275f553df7c2 has received 1 MsgVoteResp votes and 0 vote rejections"}
I1229 06:58:24.727029 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.355940Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cbdf275f553df7c2 became leader at term 4"}
I1229 06:58:24.727039 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.356018Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cbdf275f553df7c2 elected leader cbdf275f553df7c2 at term 4"}
I1229 06:58:24.727056 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358237Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cbdf275f553df7c2","local-member-attributes":"{Name:functional-695625 ClientURLs:[https://192.168.39.121:2379]}","cluster-id":"6f38b6947d3f1f22","publish-timeout":"7s"}
I1229 06:58:24.727064 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358323Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:58:24.727072 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358268Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
I1229 06:58:24.727081 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358820Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
I1229 06:58:24.727089 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.358859Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
I1229 06:58:24.727100 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360417Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:58:24.727109 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.360952Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
I1229 06:58:24.727120 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363710Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
I1229 06:58:24.727132 17440 command_runner.go:130] ! {"level":"info","ts":"2025-12-29T06:56:01.363760Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.121:2379"}
I1229 06:58:24.733042 17440 logs.go:123] Gathering logs for kube-scheduler [a79d99ad3fde] ...
I1229 06:58:24.733064 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 a79d99ad3fde"
I1229 06:58:24.755028 17440 command_runner.go:130] ! I1229 06:53:51.269699 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:58:24.755231 17440 logs.go:123] Gathering logs for kube-proxy [8911777281f4] ...
I1229 06:58:24.755256 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 8911777281f4"
I1229 06:58:24.776073 17440 command_runner.go:130] ! I1229 06:52:47.703648 1 server_linux.go:53] "Using iptables proxy"
I1229 06:58:24.776109 17440 command_runner.go:130] ! I1229 06:52:47.791676 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:58:24.776120 17440 command_runner.go:130] ! I1229 06:52:47.897173 1 shared_informer.go:377] "Caches are synced"
I1229 06:58:24.776135 17440 command_runner.go:130] ! I1229 06:52:47.900073 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.121"]
I1229 06:58:24.776154 17440 command_runner.go:130] ! E1229 06:52:47.906310 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1229 06:58:24.776162 17440 command_runner.go:130] ! I1229 06:52:48.206121 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
I1229 06:58:24.776180 17440 command_runner.go:130] ! error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
I1229 06:58:24.776188 17440 command_runner.go:130] ! Perhaps ip6tables or your kernel needs to be upgraded.
I1229 06:58:24.776195 17440 command_runner.go:130] ! >
I1229 06:58:24.776212 17440 command_runner.go:130] ! I1229 06:52:48.209509 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1229 06:58:24.776224 17440 command_runner.go:130] ! I1229 06:52:48.210145 1 server_linux.go:136] "Using iptables Proxier"
I1229 06:58:24.776249 17440 command_runner.go:130] ! I1229 06:52:48.253805 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1229 06:58:24.776257 17440 command_runner.go:130] ! I1229 06:52:48.255046 1 server.go:529] "Version info" version="v1.35.0"
I1229 06:58:24.776266 17440 command_runner.go:130] ! I1229 06:52:48.255076 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:24.776282 17440 command_runner.go:130] ! I1229 06:52:48.262205 1 config.go:200] "Starting service config controller"
I1229 06:58:24.776296 17440 command_runner.go:130] ! I1229 06:52:48.262238 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1229 06:58:24.776307 17440 command_runner.go:130] ! I1229 06:52:48.262258 1 config.go:106] "Starting endpoint slice config controller"
I1229 06:58:24.776328 17440 command_runner.go:130] ! I1229 06:52:48.262261 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1229 06:58:24.776350 17440 command_runner.go:130] ! I1229 06:52:48.262278 1 config.go:403] "Starting serviceCIDR config controller"
I1229 06:58:24.776366 17440 command_runner.go:130] ! I1229 06:52:48.262282 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1229 06:58:24.776376 17440 command_runner.go:130] ! I1229 06:52:48.270608 1 config.go:309] "Starting node config controller"
I1229 06:58:24.776388 17440 command_runner.go:130] ! I1229 06:52:48.271311 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1229 06:58:24.776404 17440 command_runner.go:130] ! I1229 06:52:48.271337 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1229 06:58:24.776420 17440 command_runner.go:130] ! I1229 06:52:48.363324 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1229 06:58:24.776439 17440 command_runner.go:130] ! I1229 06:52:48.363427 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1229 06:58:24.776453 17440 command_runner.go:130] ! I1229 06:52:48.363671 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1229 06:58:24.778558 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:58:24.778595 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:58:24.793983 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:58:24.794025 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:58:24.794040 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:58:24.794054 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:58:24.794069 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:58:24.794079 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:58:24.794096 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:58:24.794106 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:58:24.794117 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:58:24.794125 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:58:24.794136 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:58:24.794146 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:58:24.794160 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:24.794167 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:58:24.794178 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:58:24.794186 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:24.794196 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:24.794207 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:24.794215 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:58:24.794221 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:58:24.794229 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:58:24.794241 17440 command_runner.go:130] > [Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
I1229 06:58:24.794252 17440 command_runner.go:130] > [ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
I1229 06:58:24.794260 17440 command_runner.go:130] > [ +20.880271] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:24.794271 17440 command_runner.go:130] > [Dec29 06:57] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:24.795355 17440 logs.go:123] Gathering logs for kube-scheduler [4d49952084c9] ...
I1229 06:58:24.795387 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 4d49952084c9"
I1229 06:58:24.820602 17440 command_runner.go:130] ! I1229 06:53:52.882050 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:58:24.820635 17440 command_runner.go:130] ! W1229 06:54:52.896472 1 authentication.go:397] Error looking up in-cluster authentication configuration: the server was unable to return a response in the time allotted, but may still be processing the request (get configmaps extension-apiserver-authentication)
I1229 06:58:24.820646 17440 command_runner.go:130] ! W1229 06:54:52.896499 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
I1229 06:58:24.820657 17440 command_runner.go:130] ! W1229 06:54:52.896506 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1229 06:58:24.820665 17440 command_runner.go:130] ! I1229 06:54:52.913597 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.35.0"
I1229 06:58:24.820672 17440 command_runner.go:130] ! I1229 06:54:52.913622 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:24.820681 17440 command_runner.go:130] ! I1229 06:54:52.915784 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1229 06:58:24.820692 17440 command_runner.go:130] ! I1229 06:54:52.915816 1 shared_informer.go:370] "Waiting for caches to sync"
I1229 06:58:24.820698 17440 command_runner.go:130] ! I1229 06:54:52.915823 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1229 06:58:24.820705 17440 command_runner.go:130] ! I1229 06:54:52.915940 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:58:24.822450 17440 logs.go:123] Gathering logs for kube-controller-manager [17fe16a2822a] ...
I1229 06:58:24.822473 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 17fe16a2822a"
I1229 06:58:24.844122 17440 command_runner.go:130] ! I1229 06:53:51.283329 1 serving.go:386] Generated self-signed cert in-memory
I1229 06:58:24.844156 17440 command_runner.go:130] ! I1229 06:53:51.303666 1 controllermanager.go:189] "Starting" version="v1.35.0"
I1229 06:58:24.844170 17440 command_runner.go:130] ! I1229 06:53:51.303706 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:24.844184 17440 command_runner.go:130] ! I1229 06:53:51.307865 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1229 06:58:24.844201 17440 command_runner.go:130] ! I1229 06:53:51.308287 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:58:24.844210 17440 command_runner.go:130] ! I1229 06:53:51.309479 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1229 06:58:24.844218 17440 command_runner.go:130] ! I1229 06:53:51.309545 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1229 06:58:24.845429 17440 logs.go:123] Gathering logs for storage-provisioner [bd96b57aa9fc] ...
I1229 06:58:24.845453 17440 ssh_runner.go:195] Run: /bin/bash -c "docker logs --tail 400 bd96b57aa9fc"
I1229 06:58:24.867566 17440 command_runner.go:130] ! I1229 06:52:48.539098 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1229 06:58:24.867597 17440 command_runner.go:130] ! I1229 06:52:48.550309 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1229 06:58:24.867607 17440 command_runner.go:130] ! I1229 06:52:48.550373 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1229 06:58:24.867615 17440 command_runner.go:130] ! W1229 06:52:48.552935 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867622 17440 command_runner.go:130] ! W1229 06:52:48.563735 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867633 17440 command_runner.go:130] ! I1229 06:52:48.564362 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1229 06:58:24.867653 17440 command_runner.go:130] ! I1229 06:52:48.565422 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:58:24.867681 17440 command_runner.go:130] ! I1229 06:52:48.565143 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cfb65e16-c2f7-4c19-a059-8ef64f8f3f2e", APIVersion:"v1", ResourceVersion:"388", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868 became leader
I1229 06:58:24.867694 17440 command_runner.go:130] ! W1229 06:52:48.576668 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867704 17440 command_runner.go:130] ! W1229 06:52:48.582743 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867719 17440 command_runner.go:130] ! I1229 06:52:48.665711 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_functional-695625_c1740534-d530-4bf5-8b9a-b5bede576868!
I1229 06:58:24.867734 17440 command_runner.go:130] ! W1229 06:52:50.588204 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867750 17440 command_runner.go:130] ! W1229 06:52:50.593815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867763 17440 command_runner.go:130] ! W1229 06:52:52.597431 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867817 17440 command_runner.go:130] ! W1229 06:52:52.602815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867836 17440 command_runner.go:130] ! W1229 06:52:54.606663 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867848 17440 command_runner.go:130] ! W1229 06:52:54.612650 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867859 17440 command_runner.go:130] ! W1229 06:52:56.616395 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867871 17440 command_runner.go:130] ! W1229 06:52:56.622404 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867883 17440 command_runner.go:130] ! W1229 06:52:58.626804 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867891 17440 command_runner.go:130] ! W1229 06:52:58.637257 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867901 17440 command_runner.go:130] ! W1229 06:53:00.640728 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867914 17440 command_runner.go:130] ! W1229 06:53:00.646446 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867926 17440 command_runner.go:130] ! W1229 06:53:02.650659 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867944 17440 command_runner.go:130] ! W1229 06:53:02.660374 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867956 17440 command_runner.go:130] ! W1229 06:53:04.664091 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867972 17440 command_runner.go:130] ! W1229 06:53:04.669806 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867982 17440 command_runner.go:130] ! W1229 06:53:06.674203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.867997 17440 command_runner.go:130] ! W1229 06:53:06.680002 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868013 17440 command_runner.go:130] ! W1229 06:53:08.683483 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868028 17440 command_runner.go:130] ! W1229 06:53:08.688934 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868048 17440 command_runner.go:130] ! W1229 06:53:10.693644 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868063 17440 command_runner.go:130] ! W1229 06:53:10.706122 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868071 17440 command_runner.go:130] ! W1229 06:53:12.709949 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868081 17440 command_runner.go:130] ! W1229 06:53:12.715753 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868098 17440 command_runner.go:130] ! W1229 06:53:14.719191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868111 17440 command_runner.go:130] ! W1229 06:53:14.728100 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868127 17440 command_runner.go:130] ! W1229 06:53:16.731658 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868140 17440 command_runner.go:130] ! W1229 06:53:16.737463 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868153 17440 command_runner.go:130] ! W1229 06:53:18.741304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868164 17440 command_runner.go:130] ! W1229 06:53:18.746708 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868177 17440 command_runner.go:130] ! W1229 06:53:20.749662 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868192 17440 command_runner.go:130] ! W1229 06:53:20.755989 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868207 17440 command_runner.go:130] ! W1229 06:53:22.760815 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868221 17440 command_runner.go:130] ! W1229 06:53:22.772421 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868236 17440 command_runner.go:130] ! W1229 06:53:24.776403 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868247 17440 command_runner.go:130] ! W1229 06:53:24.783232 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868258 17440 command_runner.go:130] ! W1229 06:53:26.786665 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868275 17440 command_runner.go:130] ! W1229 06:53:26.792239 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868290 17440 command_runner.go:130] ! W1229 06:53:28.796420 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868304 17440 command_runner.go:130] ! W1229 06:53:28.805511 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868320 17440 command_runner.go:130] ! W1229 06:53:30.808544 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868332 17440 command_runner.go:130] ! W1229 06:53:30.816066 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868342 17440 command_runner.go:130] ! W1229 06:53:32.820090 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868358 17440 command_runner.go:130] ! W1229 06:53:32.826208 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868373 17440 command_runner.go:130] ! W1229 06:53:34.829865 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868385 17440 command_runner.go:130] ! W1229 06:53:34.835774 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868400 17440 command_runner.go:130] ! W1229 06:53:36.839291 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868414 17440 command_runner.go:130] ! W1229 06:53:36.853251 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868425 17440 command_runner.go:130] ! W1229 06:53:38.856432 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.868438 17440 command_runner.go:130] ! W1229 06:53:38.862360 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
I1229 06:58:24.872821 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:58:24.872842 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
W1229 06:58:24.872901 17440 out.go:285] X Problems detected in kube-apiserver [b206d555ad19]:
W1229 06:58:24.872915 17440 out.go:285] E1229 06:57:22.441956 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 0.0.0.0:8441: listen tcp 0.0.0.0:8441: bind: address already in use"
I1229 06:58:24.872919 17440 out.go:374] Setting ErrFile to fd 2...
I1229 06:58:24.872923 17440 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1229 06:58:34.875381 17440 api_server.go:299] Checking apiserver healthz at https://192.168.39.121:8441/healthz ...
I1229 06:58:39.877679 17440 api_server.go:315] stopped: https://192.168.39.121:8441/healthz: Get "https://192.168.39.121:8441/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
I1229 06:58:39.877779 17440 kubeadm.go:602] duration metric: took 4m48.388076341s to restartPrimaryControlPlane
W1229 06:58:39.877879 17440 out.go:285] ! Unable to restart control-plane node(s), will reset cluster: <no value>
I1229 06:58:39.877946 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I1229 06:58:39.930050 17440 command_runner.go:130] ! W1229 06:58:39.921577 8187 resetconfiguration.go:53] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I1229 06:58:49.935089 17440 command_runner.go:130] ! W1229 06:58:49.926653 8187 reset.go:141] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get "https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
I1229 06:58:49.935131 17440 command_runner.go:130] ! W1229 06:58:49.926754 8187 removeetcdmember.go:105] [reset] No kubeadm config, using etcd pod spec to get data directory
I1229 06:58:50.998307 17440 command_runner.go:130] > [reset] Reading configuration from the "kubeadm-config" ConfigMap in namespace "kube-system"...
I1229 06:58:50.998341 17440 command_runner.go:130] > [reset] Use 'kubeadm init phase upload-config kubeadm --config your-config-file' to re-upload it.
I1229 06:58:50.998348 17440 command_runner.go:130] > [preflight] Running pre-flight checks
I1229 06:58:50.998357 17440 command_runner.go:130] > [reset] Deleted contents of the etcd data directory: /var/lib/minikube/etcd
I1229 06:58:50.998366 17440 command_runner.go:130] > [reset] Stopping the kubelet service
I1229 06:58:50.998372 17440 command_runner.go:130] > [reset] Unmounting mounted directories in "/var/lib/kubelet"
I1229 06:58:50.998386 17440 command_runner.go:130] > [reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
I1229 06:58:50.998407 17440 command_runner.go:130] > [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
I1229 06:58:50.998417 17440 command_runner.go:130] > The reset process does not perform cleanup of CNI plugin configuration,
I1229 06:58:50.998428 17440 command_runner.go:130] > network filtering rules and kubeconfig files.
I1229 06:58:50.998434 17440 command_runner.go:130] > For information on how to perform this cleanup manually, please see:
I1229 06:58:50.998442 17440 command_runner.go:130] > https://k8s.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/
I1229 06:58:50.998458 17440 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force": (11.120499642s)
I1229 06:58:50.998527 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1229 06:58:51.015635 17440 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1229 06:58:51.028198 17440 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1229 06:58:51.040741 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
I1229 06:58:51.040780 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
I1229 06:58:51.040811 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
I1229 06:58:51.040826 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.040865 17440 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.040877 17440 kubeadm.go:158] found existing configuration files:
I1229 06:58:51.040925 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1229 06:58:51.051673 17440 command_runner.go:130] ! grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.052090 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.052155 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1229 06:58:51.064755 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1229 06:58:51.076455 17440 command_runner.go:130] ! grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.076517 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.076577 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1229 06:58:51.088881 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1229 06:58:51.099253 17440 command_runner.go:130] ! grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.099652 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.099710 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1229 06:58:51.111487 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1229 06:58:51.122532 17440 command_runner.go:130] ! grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.122905 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.122972 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1229 06:58:51.135143 17440 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1229 06:58:51.355420 17440 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.355450 17440 command_runner.go:130] ! [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.355543 17440 kubeadm.go:319] [preflight] Some fatal errors occurred:
I1229 06:58:51.355556 17440 command_runner.go:130] ! [preflight] Some fatal errors occurred:
I1229 06:58:51.355615 17440 kubeadm.go:319] [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.355625 17440 command_runner.go:130] ! [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.355790 17440 kubeadm.go:319] [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.355837 17440 command_runner.go:130] ! [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.356251 17440 kubeadm.go:319] error: error execution phase preflight: preflight checks failed
I1229 06:58:51.356265 17440 command_runner.go:130] ! error: error execution phase preflight: preflight checks failed
I1229 06:58:51.356317 17440 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.356324 17440 command_runner.go:130] ! To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.357454 17440 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.357471 17440 command_runner.go:130] > [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.357544 17440 kubeadm.go:319] [preflight] Running pre-flight checks
I1229 06:58:51.357561 17440 command_runner.go:130] > [preflight] Running pre-flight checks
W1229 06:58:51.357680 17440 out.go:285] ! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.357753 17440 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I1229 06:58:51.401004 17440 command_runner.go:130] > [preflight] Running pre-flight checks
I1229 06:58:51.401036 17440 command_runner.go:130] > [reset] Deleted contents of the etcd data directory: /var/lib/etcd
I1229 06:58:51.401047 17440 command_runner.go:130] > [reset] Stopping the kubelet service
I1229 06:58:51.408535 17440 command_runner.go:130] > [reset] Unmounting mounted directories in "/var/lib/kubelet"
I1229 06:58:51.413813 17440 command_runner.go:130] > [reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
I1229 06:58:51.415092 17440 command_runner.go:130] > [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
I1229 06:58:51.415117 17440 command_runner.go:130] > The reset process does not perform cleanup of CNI plugin configuration,
I1229 06:58:51.415128 17440 command_runner.go:130] > network filtering rules and kubeconfig files.
I1229 06:58:51.415137 17440 command_runner.go:130] > For information on how to perform this cleanup manually, please see:
I1229 06:58:51.415145 17440 command_runner.go:130] > https://k8s.io/docs/reference/setup-tools/kubeadm/kubeadm-reset/
I1229 06:58:51.415645 17440 command_runner.go:130] ! W1229 06:58:51.391426 8625 resetconfiguration.go:53] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
I1229 06:58:51.415670 17440 command_runner.go:130] ! W1229 06:58:51.392518 8625 removeetcdmember.go:105] [reset] No kubeadm config, using etcd pod spec to get data directory
I1229 06:58:51.415739 17440 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1229 06:58:51.432316 17440 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1229 06:58:51.444836 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
I1229 06:58:51.444860 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
I1229 06:58:51.444867 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
I1229 06:58:51.444874 17440 command_runner.go:130] ! ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.445417 17440 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1229 06:58:51.445435 17440 kubeadm.go:158] found existing configuration files:
I1229 06:58:51.445485 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1229 06:58:51.457038 17440 command_runner.go:130] ! grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.457099 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1229 06:58:51.457146 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1229 06:58:51.469980 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1229 06:58:51.480965 17440 command_runner.go:130] ! grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.481435 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1229 06:58:51.481498 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1229 06:58:51.493408 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1229 06:58:51.504342 17440 command_runner.go:130] ! grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.504404 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1229 06:58:51.504468 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1229 06:58:51.516567 17440 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1229 06:58:51.526975 17440 command_runner.go:130] ! grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.527475 17440 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1229 06:58:51.527532 17440 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1229 06:58:51.539365 17440 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1229 06:58:51.587038 17440 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.587068 17440 command_runner.go:130] > [init] Using Kubernetes version: v1.35.0
I1229 06:58:51.587108 17440 kubeadm.go:319] [preflight] Running pre-flight checks
I1229 06:58:51.587113 17440 command_runner.go:130] > [preflight] Running pre-flight checks
I1229 06:58:51.738880 17440 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.738912 17440 command_runner.go:130] ! [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1229 06:58:51.738963 17440 kubeadm.go:319] [preflight] Some fatal errors occurred:
I1229 06:58:51.738975 17440 command_runner.go:130] ! [preflight] Some fatal errors occurred:
I1229 06:58:51.739029 17440 kubeadm.go:319] [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.739038 17440 command_runner.go:130] ! [ERROR Port-8441]: Port 8441 is in use
I1229 06:58:51.739157 17440 kubeadm.go:319] [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.739166 17440 command_runner.go:130] ! [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
I1229 06:58:51.739271 17440 kubeadm.go:319] error: error execution phase preflight: preflight checks failed
I1229 06:58:51.739294 17440 command_runner.go:130] ! error: error execution phase preflight: preflight checks failed
I1229 06:58:51.739348 17440 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.739355 17440 command_runner.go:130] ! To see the stack trace of this error execute with --v=5 or higher
I1229 06:58:51.739406 17440 kubeadm.go:403] duration metric: took 5m0.289116828s to StartCluster
I1229 06:58:51.739455 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1229 06:58:51.739507 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-apiserver
I1229 06:58:51.776396 17440 cri.go:96] found id: ""
I1229 06:58:51.776420 17440 logs.go:282] 0 containers: []
W1229 06:58:51.776428 17440 logs.go:284] No container was found matching "kube-apiserver"
I1229 06:58:51.776434 17440 cri.go:61] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1229 06:58:51.776522 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=etcd
I1229 06:58:51.808533 17440 cri.go:96] found id: ""
I1229 06:58:51.808556 17440 logs.go:282] 0 containers: []
W1229 06:58:51.808563 17440 logs.go:284] No container was found matching "etcd"
I1229 06:58:51.808570 17440 cri.go:61] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1229 06:58:51.808625 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=coredns
I1229 06:58:51.841860 17440 cri.go:96] found id: ""
I1229 06:58:51.841887 17440 logs.go:282] 0 containers: []
W1229 06:58:51.841894 17440 logs.go:284] No container was found matching "coredns"
I1229 06:58:51.841900 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1229 06:58:51.841955 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-scheduler
I1229 06:58:51.875485 17440 cri.go:96] found id: ""
I1229 06:58:51.875512 17440 logs.go:282] 0 containers: []
W1229 06:58:51.875520 17440 logs.go:284] No container was found matching "kube-scheduler"
I1229 06:58:51.875526 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1229 06:58:51.875576 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-proxy
I1229 06:58:51.909661 17440 cri.go:96] found id: ""
I1229 06:58:51.909699 17440 logs.go:282] 0 containers: []
W1229 06:58:51.909712 17440 logs.go:284] No container was found matching "kube-proxy"
I1229 06:58:51.909720 17440 cri.go:61] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1229 06:58:51.909790 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-controller-manager
I1229 06:58:51.943557 17440 cri.go:96] found id: ""
I1229 06:58:51.943594 17440 logs.go:282] 0 containers: []
W1229 06:58:51.943607 17440 logs.go:284] No container was found matching "kube-controller-manager"
I1229 06:58:51.943616 17440 cri.go:61] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1229 06:58:51.943685 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kindnet
I1229 06:58:51.979189 17440 cri.go:96] found id: ""
I1229 06:58:51.979219 17440 logs.go:282] 0 containers: []
W1229 06:58:51.979228 17440 logs.go:284] No container was found matching "kindnet"
I1229 06:58:51.979234 17440 cri.go:61] listing CRI containers in root : {State:all Name:storage-provisioner Namespaces:[]}
I1229 06:58:51.979285 17440 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=storage-provisioner
I1229 06:58:52.013436 17440 cri.go:96] found id: ""
I1229 06:58:52.013472 17440 logs.go:282] 0 containers: []
W1229 06:58:52.013482 17440 logs.go:284] No container was found matching "storage-provisioner"
I1229 06:58:52.013494 17440 logs.go:123] Gathering logs for kubelet ...
I1229 06:58:52.013507 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I1229 06:58:52.030384 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.141703 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.030429 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.164789 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:58:52.030454 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: I1229 06:53:48.190793 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:58:52.030481 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202029 2634 kuberuntime_manager.go:1961] "PodSandboxStatus of sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd" pod="kube-system/etcd-functional-695625"
I1229 06:58:52.030506 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202077 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:58:52.030530 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202095 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/etcd-functional-695625"
I1229 06:58:52.030550 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202348 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:58:52.030574 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202382 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:58:52.030601 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202394 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-proxy-g7lp9"
I1229 06:58:52.030643 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202436 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-proxy-g7lp9_kube-system(9c2c2ac1-7fa0-427d-b78e-ee14e169895a)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-proxy-g7lp9" podUID="9c2c2ac1-7fa0-427d-b78e-ee14e169895a"
I1229 06:58:52.030670 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202695 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:58:52.030694 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202717 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"}
I1229 06:58:52.030721 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202737 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:58:52.030757 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202753 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"b5f201ca-6d54-4e15-9584-396fb1486f3c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/storage-provisioner" podUID="b5f201ca-6d54-4e15-9584-396fb1486f3c"
I1229 06:58:52.030787 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202781 2634 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF"
I1229 06:58:52.030826 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202797 2634 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.030853 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.202829 2634 kuberuntime_manager.go:1558] "CreatePodSandbox for pod failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.030893 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203153 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\\\": rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.030921 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203201 2634 log.go:32] "StopPodSandbox from runtime service failed" err="rpc error: code = Unavailable desc = error reading from server: EOF" podSandboxID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:58:52.030943 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203215 2634 kuberuntime_manager.go:1881] "Failed to stop sandbox" podSandboxID={"Type":"docker","ID":"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"}
I1229 06:58:52.030981 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203229 2634 kuberuntime_manager.go:1422] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\""
I1229 06:58:52.031015 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.203240 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"00a95e37-1394-45a7-a376-b195e31e3e9c\" with KillPodSandboxError: \"rpc error: code = Unavailable desc = error reading from server: EOF\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.031053 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.205108 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer"
I1229 06:58:52.031087 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205291 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"ebc0dd45a3bf1e20d1e524935fd6129c\"}"
I1229 06:58:52.031117 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205358 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="ebc0dd45a3bf1e20d1e524935fd6129c"
I1229 06:58:52.031146 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205374 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:58:52.031189 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205391 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-scheduler-functional-695625"
I1229 06:58:52.031223 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205442 2634 log.go:32] "ListPodSandbox with filter from runtime service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" filter="label_selector:{key:\"io.kubernetes.pod.uid\" value:\"5079d003096e0cf8214852718da6832c\"}"
I1229 06:58:52.031253 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205472 2634 kuberuntime_sandbox.go:351] "Failed to list sandboxes for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.031281 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205487 2634 generic.go:455] "PLEG: Write status" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.031311 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: E1229 06:53:48.205502 2634 generic.go:300] "PLEG: Ignoring events for pod" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: read unix @->/run/cri-dockerd.sock: read: connection reset by peer\"" pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.031347 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.306369 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:58:52.031383 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.465709 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:58:52.031422 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 kubelet[2634]: W1229 06:53:48.727775 2634 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "/var/run/cri-dockerd.sock", ServerName: "localhost", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial unix /var/run/cri-dockerd.sock: connect: connection refused"
I1229 06:58:52.031445 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.224724 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:58:52.031467 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.225054 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.031491 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.239349 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:58:52.031516 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.239613 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.031538 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.260924 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:58:52.031562 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.262706 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.031584 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: I1229 06:53:49.271403 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:58:52.031606 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 kubelet[2634]: E1229 06:53:49.272071 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.031628 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.486082 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.031651 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.527267 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.031673 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.585714 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.031695 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 kubelet[2634]: E1229 06:53:50.682419 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.031717 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 kubelet[2634]: E1229 06:53:51.994421 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.031738 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.009282 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.031763 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.028514 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.031786 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: I1229 06:53:52.059063 2634 scope.go:122] "RemoveContainer" containerID="4b032678478a0db80f17dd1d989d5d3ad03f5c19d261d887ee8bbc80c0ef716c"
I1229 06:58:52.031824 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.061268 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.031855 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.136206 2634 controller.go:251] "Failed to update lease" err="Put \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:58:52.031894 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.348866 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.031949 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 kubelet[2634]: E1229 06:53:52.420977 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.031981 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.083455 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:58:52.032005 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099631 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:58:52.032025 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.099665 2634 scope.go:122] "RemoveContainer" containerID="14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697"
I1229 06:58:52.032048 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.099823 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.032069 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.114949 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:58:52.032093 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115125 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.032112 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.115147 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:58:52.032150 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.115570 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.032170 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128002 2634 scope.go:122] "RemoveContainer" containerID="abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32"
I1229 06:58:52.032192 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128620 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:58:52.032214 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.128846 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.032234 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.128862 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.032269 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.129184 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.032290 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.146245 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.032314 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.149274 2634 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:58:52.032335 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: I1229 06:53:53.158968 2634 scope.go:122] "RemoveContainer" containerID="bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159"
I1229 06:58:52.032371 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 kubelet[2634]: E1229 06:53:53.483523 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.032395 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.165031 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.032414 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.165425 2634 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:58:52.032452 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.166088 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-functional-695625_kube-system(5079d003096e0cf8214852718da6832c)\"" pod="kube-system/kube-controller-manager-functional-695625" podUID="5079d003096e0cf8214852718da6832c"
I1229 06:58:52.032473 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.177787 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.032495 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.177811 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:58:52.032530 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.178010 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.032552 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190233 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.032573 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: I1229 06:53:54.190259 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.032608 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190388 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.032631 17440 command_runner.go:130] > Dec 29 06:53:54 functional-695625 kubelet[2634]: E1229 06:53:54.190596 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.032655 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.197650 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.032676 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198541 2634 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-wfq7m" containerName="coredns"
I1229 06:58:52.032696 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: I1229 06:53:55.198579 2634 scope.go:122] "RemoveContainer" containerID="6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:58:52.032735 17440 command_runner.go:130] > Dec 29 06:53:55 functional-695625 kubelet[2634]: E1229 06:53:55.198854 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 10s restarting failed container=coredns pod=coredns-7d764666f9-wfq7m_kube-system(00a95e37-1394-45a7-a376-b195e31e3e9c)\"" pod="kube-system/coredns-7d764666f9-wfq7m" podUID="00a95e37-1394-45a7-a376-b195e31e3e9c"
I1229 06:58:52.032819 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.114313 2634 kubelet_node_status.go:474] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-29T06:53:48Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"nodeInfo\\\":{\\\"containerRuntimeVersion\\\":\\\"docker://28.5.2\\\"}}}\" for node \"functional-695625\": Patch \"https://192.168.39.121:8441/api/v1/nodes/functional-695625/status?timeout=10s\": net/http:
request canceled (Client.Timeout exceeded while awaiting headers)"
I1229 06:58:52.032845 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.650698 2634 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.032864 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.650771 2634 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.032899 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: E1229 06:53:58.651066 2634 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.032919 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 kubelet[2634]: I1229 06:53:58.808551 2634 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:58:52.032935 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:58:52.032948 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:58:52.032960 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:58:52.032981 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 systemd[1]: kubelet.service: Consumed 2.468s CPU time, 33.6M memory peak.
I1229 06:58:52.032995 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 systemd[1]: Started kubelet: The Kubernetes Node Agent.
I1229 06:58:52.033012 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045324 6517 server.go:525] "Kubelet version" kubeletVersion="v1.35.0"
I1229 06:58:52.033029 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045661 6517 server.go:527] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1229 06:58:52.033042 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045707 6517 watchdog_linux.go:95] "Systemd watchdog is not enabled"
I1229 06:58:52.033062 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.045732 6517 watchdog_linux.go:138] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started."
I1229 06:58:52.033080 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.046147 6517 server.go:951] "Client rotation is on, will bootstrap in background"
I1229 06:58:52.033101 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.047668 6517 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem"
I1229 06:58:52.033120 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.050807 6517 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1229 06:58:52.033138 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.062385 6517 server.go:1418] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd"
I1229 06:58:52.033166 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066238 6517 server.go:775] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /"
I1229 06:58:52.033187 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066279 6517 server.go:836] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false
I1229 06:58:52.033206 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066453 6517 container_manager_linux.go:272] "Container manager verified user specified cgroup-root exists" cgroupRoot=[]
I1229 06:58:52.033274 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066538 6517 container_manager_linux.go:277] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"functional-695625","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"no
ne","TopologyManagerPolicyOptions":null,"CgroupVersion":2}
I1229 06:58:52.033294 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066683 6517 topology_manager.go:143] "Creating topology manager with none policy"
I1229 06:58:52.033309 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066691 6517 container_manager_linux.go:308] "Creating device plugin manager"
I1229 06:58:52.033326 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066715 6517 container_manager_linux.go:317] "Creating Dynamic Resource Allocation (DRA) manager"
I1229 06:58:52.033343 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.066977 6517 state_mem.go:41] "Initialized" logger="CPUManager state memory"
I1229 06:58:52.033359 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067273 6517 kubelet.go:482] "Attempting to sync node with API server"
I1229 06:58:52.033378 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067303 6517 kubelet.go:383] "Adding static pod path" path="/etc/kubernetes/manifests"
I1229 06:58:52.033398 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067321 6517 kubelet.go:394] "Adding apiserver pod source"
I1229 06:58:52.033413 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.067339 6517 apiserver.go:42] "Waiting for node sync before watching apiserver pods"
I1229 06:58:52.033431 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.071645 6517 kuberuntime_manager.go:294] "Container runtime initialized" containerRuntime="docker" version="28.5.2" apiVersion="v1"
I1229 06:58:52.033453 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072532 6517 kubelet.go:943] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled"
I1229 06:58:52.033476 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.072614 6517 kubelet.go:970] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled"
I1229 06:58:52.033492 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.079617 6517 server.go:1257] "Started kubelet"
I1229 06:58:52.033507 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.087576 6517 fs_resource_analyzer.go:69] "Starting FS ResourceAnalyzer"
I1229 06:58:52.033526 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.098777 6517 scope.go:122] "RemoveContainer" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:58:52.033542 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.106373 6517 server.go:182] "Starting to listen" address="0.0.0.0" port=10250
I1229 06:58:52.033559 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.109848 6517 server.go:317] "Adding debug handlers to kubelet server"
I1229 06:58:52.033609 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117444 6517 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10
I1229 06:58:52.033625 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117555 6517 server_v1.go:49] "podresources" method="list" useActivePods=true
I1229 06:58:52.033642 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.117716 6517 server.go:254] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock"
I1229 06:58:52.033665 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.118699 6517 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key"
I1229 06:58:52.033681 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119167 6517 volume_manager.go:311] "Starting Kubelet Volume Manager"
I1229 06:58:52.033700 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.119433 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:58:52.033718 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.119972 6517 desired_state_of_world_populator.go:146] "Desired state populator starts to run"
I1229 06:58:52.033734 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.120370 6517 reconciler.go:29] "Reconciler: start to sync state"
I1229 06:58:52.033751 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.130418 6517 factory.go:223] Registration of the systemd container factory successfully
I1229 06:58:52.033776 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.131188 6517 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory
I1229 06:58:52.033808 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.141029 6517 factory.go:223] Registration of the containerd container factory successfully
I1229 06:58:52.033826 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183036 6517 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4"
I1229 06:58:52.033840 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183220 6517 status_manager.go:249] "Starting to sync pod status with apiserver"
I1229 06:58:52.033855 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.183330 6517 kubelet.go:2501] "Starting kubelet main sync loop"
I1229 06:58:52.033878 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.183444 6517 kubelet.go:2525] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]"
I1229 06:58:52.033905 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.221428 6517 kubelet_node_status.go:392] "Error getting the current node from lister" err="node \"functional-695625\" not found"
I1229 06:58:52.033937 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.242700 6517 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd" containerID="fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:58:52.033974 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.243294 6517 image_gc_manager.go:222] "Failed to monitor images" err="get container status: runtime container status: rpc error: code = Unknown desc = Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:58:52.033993 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269160 6517 cpu_manager.go:225] "Starting" policy="none"
I1229 06:58:52.034010 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269189 6517 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s"
I1229 06:58:52.034030 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269211 6517 state_mem.go:41] "Initialized" logger="CPUManager state checkpoint.CPUManager state memory"
I1229 06:58:52.034050 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269368 6517 state_mem.go:94] "Updated default CPUSet" logger="CPUManager state checkpoint.CPUManager state memory" cpuSet=""
I1229 06:58:52.034084 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269407 6517 state_mem.go:102] "Updated CPUSet assignments" logger="CPUManager state checkpoint.CPUManager state memory" assignments={}
I1229 06:58:52.034099 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269429 6517 policy_none.go:50] "Start"
I1229 06:58:52.034116 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269440 6517 memory_manager.go:187] "Starting memorymanager" policy="None"
I1229 06:58:52.034134 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269450 6517 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint"
I1229 06:58:52.034152 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.269563 6517 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint"
I1229 06:58:52.034167 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.270193 6517 policy_none.go:44] "Start"
I1229 06:58:52.034186 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.280697 6517 manager.go:525] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint"
I1229 06:58:52.034203 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282209 6517 eviction_manager.go:194] "Eviction manager: starting control loop"
I1229 06:58:52.034224 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282399 6517 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s"
I1229 06:58:52.034241 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.282694 6517 plugin_manager.go:121] "Starting Kubelet Plugin Manager"
I1229 06:58:52.034265 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.285700 6517 eviction_manager.go:272] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime"
I1229 06:58:52.034286 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.286000 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034308 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.290189 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034332 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.296210 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034358 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296213 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8"
I1229 06:58:52.034380 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296423 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6"
I1229 06:58:52.034404 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296509 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd"
I1229 06:58:52.034427 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296522 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd"
I1229 06:58:52.034450 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296659 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3"
I1229 06:58:52.034472 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.296736 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7"
I1229 06:58:52.034499 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.298291 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034521 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.300783 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c"
I1229 06:58:52.034544 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.307864 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1"
I1229 06:58:52.034566 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327004 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784"
I1229 06:58:52.034588 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.327039 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780"
I1229 06:58:52.034611 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.337430 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd"
I1229 06:58:52.034633 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338584 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3cc8048f6d9ff1df7ba90196f828ce8838881d8a6049d1e2f085d13b40a3a71"
I1229 06:58:52.034655 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.338603 6517 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263"
I1229 06:58:52.034678 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: E1229 06:54:00.339318 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.034697 17440 command_runner.go:130] > Dec 29 06:54:00 functional-695625 kubelet[6517]: I1229 06:54:00.384315 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.034724 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.121079 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="200ms"
I1229 06:58:52.034749 17440 command_runner.go:130] > Dec 29 06:54:10 functional-695625 kubelet[6517]: E1229 06:54:10.286789 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034771 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.288099 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034819 17440 command_runner.go:130] > Dec 29 06:54:20 functional-695625 kubelet[6517]: E1229 06:54:20.322920 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="400ms"
I1229 06:58:52.034843 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.289381 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.034873 17440 command_runner.go:130] > Dec 29 06:54:30 functional-695625 kubelet[6517]: E1229 06:54:30.724518 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="800ms"
I1229 06:58:52.034936 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.088119 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bc22bb49a default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,LastTimestamp:2025-12-29 06:54:00.079586458 +0000 UTC m=+0.095335847,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.034963 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: E1229 06:54:34.387607 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.034993 17440 command_runner.go:130] > Dec 29 06:54:34 functional-695625 kubelet[6517]: I1229 06:54:34.589687 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.035018 17440 command_runner.go:130] > Dec 29 06:54:40 functional-695625 kubelet[6517]: E1229 06:54:40.289653 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035049 17440 command_runner.go:130] > Dec 29 06:54:41 functional-695625 kubelet[6517]: E1229 06:54:41.525961 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="1.6s"
I1229 06:58:52.035071 17440 command_runner.go:130] > Dec 29 06:54:50 functional-695625 kubelet[6517]: E1229 06:54:50.290623 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035099 17440 command_runner.go:130] > Dec 29 06:54:53 functional-695625 kubelet[6517]: E1229 06:54:53.127043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="3.2s"
I1229 06:58:52.035126 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.123055 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.035159 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223407 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-ca-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.035194 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.223452 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-usr-share-ca-certificates\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.035228 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224254 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-flexvolume-dir\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035263 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224286 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-k8s-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035299 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224307 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/ebc0dd45a3bf1e20d1e524935fd6129c-kubeconfig\") pod \"kube-scheduler-functional-695625\" (UID: \"ebc0dd45a3bf1e20d1e524935fd6129c\") " pod="kube-system/kube-scheduler-functional-695625"
I1229 06:58:52.035333 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224328 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d173c000af26dcef62569d3a5345fcae-k8s-certs\") pod \"kube-apiserver-functional-695625\" (UID: \"d173c000af26dcef62569d3a5345fcae\") " pod="kube-system/kube-apiserver-functional-695625"
I1229 06:58:52.035368 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224346 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-ca-certs\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035408 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224360 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-kubeconfig\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035445 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224377 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/5079d003096e0cf8214852718da6832c-usr-share-ca-certificates\") pod \"kube-controller-manager-functional-695625\" (UID: \"5079d003096e0cf8214852718da6832c\") " pod="kube-system/kube-controller-manager-functional-695625"
I1229 06:58:52.035477 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224432 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-certs\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:58:52.035512 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: I1229 06:55:00.224449 6517 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/8da5c6c8980da2ca920a502b6f312384-etcd-data\") pod \"etcd-functional-695625\" (UID: \"8da5c6c8980da2ca920a502b6f312384\") " pod="kube-system/etcd-functional-695625"
I1229 06:58:52.035534 17440 command_runner.go:130] > Dec 29 06:55:00 functional-695625 kubelet[6517]: E1229 06:55:00.291332 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035563 17440 command_runner.go:130] > Dec 29 06:55:06 functional-695625 kubelet[6517]: E1229 06:55:06.329330 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="the server was unable to return a response in the time allotted, but may still be processing the request (get leases.coordination.k8s.io functional-695625)" interval="6.4s"
I1229 06:58:52.035631 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.090561 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.035658 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: E1229 06:55:08.592540 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.035677 17440 command_runner.go:130] > Dec 29 06:55:08 functional-695625 kubelet[6517]: I1229 06:55:08.994308 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.035699 17440 command_runner.go:130] > Dec 29 06:55:10 functional-695625 kubelet[6517]: E1229 06:55:10.291711 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035720 17440 command_runner.go:130] > Dec 29 06:55:20 functional-695625 kubelet[6517]: E1229 06:55:20.292793 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035749 17440 command_runner.go:130] > Dec 29 06:55:22 functional-695625 kubelet[6517]: E1229 06:55:22.729733 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.035771 17440 command_runner.go:130] > Dec 29 06:55:30 functional-695625 kubelet[6517]: E1229 06:55:30.293859 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035814 17440 command_runner.go:130] > Dec 29 06:55:39 functional-695625 kubelet[6517]: E1229 06:55:39.730496 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.035838 17440 command_runner.go:130] > Dec 29 06:55:40 functional-695625 kubelet[6517]: E1229 06:55:40.294978 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.035902 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.093022 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.035927 17440 command_runner.go:130] > Dec 29 06:55:42 functional-695625 kubelet[6517]: E1229 06:55:42.996721 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.035947 17440 command_runner.go:130] > Dec 29 06:55:43 functional-695625 kubelet[6517]: I1229 06:55:43.798535 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.035978 17440 command_runner.go:130] > Dec 29 06:55:50 functional-695625 kubelet[6517]: E1229 06:55:50.295990 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.036010 17440 command_runner.go:130] > Dec 29 06:55:56 functional-695625 kubelet[6517]: E1229 06:55:56.732252 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.036038 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.228455 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.036061 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: E1229 06:56:00.296294 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.036082 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.339811 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.036102 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.590728 6517 scope.go:122] "RemoveContainer" containerID="d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:58:52.036121 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 kubelet[6517]: I1229 06:56:00.596576 6517 scope.go:122] "RemoveContainer" containerID="17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:58:52.036141 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.196928 6517 scope.go:122] "RemoveContainer" containerID="fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:58:52.036165 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199564 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036190 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199638 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036212 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: I1229 06:56:01.199656 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.036251 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.199813 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.036275 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.211732 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036299 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.212086 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.036323 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226269 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036345 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226760 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.036369 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226846 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036393 17440 command_runner.go:130] > Dec 29 06:56:01 functional-695625 kubelet[6517]: E1229 06:56:01.226932 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.036418 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240397 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036441 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.036464 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240759 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036488 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.240798 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.036511 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241099 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036536 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241133 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.036561 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241440 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036584 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241482 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036606 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: I1229 06:56:02.241498 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.036642 17440 command_runner.go:130] > Dec 29 06:56:02 functional-695625 kubelet[6517]: E1229 06:56:02.241585 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.036664 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246390 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036687 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246454 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.036711 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246667 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036734 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246717 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036754 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: I1229 06:56:03.246732 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.036806 17440 command_runner.go:130] > Dec 29 06:56:03 functional-695625 kubelet[6517]: E1229 06:56:03.246832 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.036895 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.297136 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.036922 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342375 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.036945 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342456 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.036973 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: I1229 06:56:10.342477 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.037009 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.342670 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037032 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593708 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037052 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.593770 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.037076 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598591 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037098 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.598652 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.037122 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606502 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037144 17440 command_runner.go:130] > Dec 29 06:56:10 functional-695625 kubelet[6517]: E1229 06:56:10.606600 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.037168 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302101 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037189 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302675 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.037212 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302176 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037235 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302763 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037254 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: I1229 06:56:11.302780 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.037278 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302307 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037303 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 kubelet[6517]: E1229 06:56:11.302816 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.037325 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.321043 6517 scope.go:122] "RemoveContainer" containerID="78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac"
I1229 06:58:52.037348 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.321965 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037372 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322030 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037392 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: I1229 06:56:12.322044 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037424 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.322163 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037449 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323008 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037472 17440 command_runner.go:130] > Dec 29 06:56:12 functional-695625 kubelet[6517]: E1229 06:56:12.323148 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.037497 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037518 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336097 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037539 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: I1229 06:56:13.336114 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037574 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.336243 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037604 17440 command_runner.go:130] > Dec 29 06:56:13 functional-695625 kubelet[6517]: E1229 06:56:13.733654 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.037669 17440 command_runner.go:130] > Dec 29 06:56:16 functional-695625 kubelet[6517]: E1229 06:56:16.095560 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.037694 17440 command_runner.go:130] > Dec 29 06:56:17 functional-695625 kubelet[6517]: E1229 06:56:17.801052 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.037713 17440 command_runner.go:130] > Dec 29 06:56:19 functional-695625 kubelet[6517]: I1229 06:56:19.403026 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.037734 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.297746 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.037760 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342467 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037784 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342554 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037816 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.342589 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037851 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.342829 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037875 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.385984 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.037897 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386062 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.037917 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: I1229 06:56:20.386078 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.037950 17440 command_runner.go:130] > Dec 29 06:56:20 functional-695625 kubelet[6517]: E1229 06:56:20.386220 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.037981 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.298955 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038011 17440 command_runner.go:130] > Dec 29 06:56:30 functional-695625 kubelet[6517]: E1229 06:56:30.734998 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.038035 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185639 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038059 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.185732 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038079 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.185750 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.038102 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493651 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038125 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493733 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038147 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: I1229 06:56:32.493755 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038182 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 kubelet[6517]: E1229 06:56:32.493996 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038203 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.510294 6517 scope.go:122] "RemoveContainer" containerID="18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb"
I1229 06:58:52.038223 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511464 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038243 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511520 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038260 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: I1229 06:56:33.511535 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038297 17440 command_runner.go:130] > Dec 29 06:56:33 functional-695625 kubelet[6517]: E1229 06:56:33.511684 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038321 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525404 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038344 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525467 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038365 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: I1229 06:56:34.525482 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038401 17440 command_runner.go:130] > Dec 29 06:56:34 functional-695625 kubelet[6517]: E1229 06:56:34.525663 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038423 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.300040 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038449 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342011 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038471 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342082 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038491 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.342099 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038526 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.342223 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038549 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567456 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.038585 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.567665 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.038608 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: I1229 06:56:40.567686 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.038643 17440 command_runner.go:130] > Dec 29 06:56:40 functional-695625 kubelet[6517]: E1229 06:56:40.568152 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.038670 17440 command_runner.go:130] > Dec 29 06:56:47 functional-695625 kubelet[6517]: E1229 06:56:47.736964 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.038735 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.098168 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.27202431 +0000 UTC m=+0.287773690,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.038758 17440 command_runner.go:130] > Dec 29 06:56:50 functional-695625 kubelet[6517]: E1229 06:56:50.300747 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038785 17440 command_runner.go:130] > Dec 29 06:56:53 functional-695625 kubelet[6517]: E1229 06:56:53.405155 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.038817 17440 command_runner.go:130] > Dec 29 06:56:56 functional-695625 kubelet[6517]: I1229 06:56:56.606176 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.038842 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.301915 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038869 17440 command_runner.go:130] > Dec 29 06:57:00 functional-695625 kubelet[6517]: E1229 06:57:00.330173 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.038900 17440 command_runner.go:130] > Dec 29 06:57:04 functional-695625 kubelet[6517]: E1229 06:57:04.738681 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.038922 17440 command_runner.go:130] > Dec 29 06:57:10 functional-695625 kubelet[6517]: E1229 06:57:10.302083 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038946 17440 command_runner.go:130] > Dec 29 06:57:20 functional-695625 kubelet[6517]: E1229 06:57:20.302612 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.038977 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185645 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039003 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.185704 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.039034 17440 command_runner.go:130] > Dec 29 06:57:21 functional-695625 kubelet[6517]: E1229 06:57:21.740062 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.039059 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.185952 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039082 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.186017 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039102 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.186034 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.039126 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.873051 6517 scope.go:122] "RemoveContainer" containerID="0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec"
I1229 06:58:52.039149 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874264 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039171 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874357 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039191 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: I1229 06:57:22.874375 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.039227 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 kubelet[6517]: E1229 06:57:22.874499 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.039252 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892021 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039275 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892083 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039295 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: I1229 06:57:23.892098 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.039330 17440 command_runner.go:130] > Dec 29 06:57:23 functional-695625 kubelet[6517]: E1229 06:57:23.892218 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.039396 17440 command_runner.go:130] > Dec 29 06:57:24 functional-695625 kubelet[6517]: E1229 06:57:24.100978 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc794297 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node functional-695625 status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252441239 +0000 UTC m=+0.268190608,LastTimestamp:2025-12-29 06:54:00.27223373 +0000 UTC m=+0.287983111,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.039419 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.302837 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039444 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.341968 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039468 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.342033 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.039488 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: I1229 06:57:30.342050 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.039523 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.342233 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.039550 17440 command_runner.go:130] > Dec 29 06:57:30 functional-695625 kubelet[6517]: E1229 06:57:30.608375 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.039576 17440 command_runner.go:130] > Dec 29 06:57:32 functional-695625 kubelet[6517]: E1229 06:57:32.186377 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039598 17440 command_runner.go:130] > Dec 29 06:57:32 functional-695625 kubelet[6517]: E1229 06:57:32.186459 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.039675 17440 command_runner.go:130] > Dec 29 06:57:33 functional-695625 kubelet[6517]: E1229 06:57:33.188187 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.039700 17440 command_runner.go:130] > Dec 29 06:57:33 functional-695625 kubelet[6517]: E1229 06:57:33.188267 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-695625" containerName="kube-controller-manager"
I1229 06:58:52.039715 17440 command_runner.go:130] > Dec 29 06:57:37 functional-695625 kubelet[6517]: I1229 06:57:37.010219 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.039749 17440 command_runner.go:130] > Dec 29 06:57:38 functional-695625 kubelet[6517]: E1229 06:57:38.741770 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.039773 17440 command_runner.go:130] > Dec 29 06:57:40 functional-695625 kubelet[6517]: E1229 06:57:40.303258 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039808 17440 command_runner.go:130] > Dec 29 06:57:50 functional-695625 kubelet[6517]: E1229 06:57:50.304120 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039837 17440 command_runner.go:130] > Dec 29 06:57:55 functional-695625 kubelet[6517]: E1229 06:57:55.743031 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": context deadline exceeded" interval="7s"
I1229 06:58:52.039903 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 kubelet[6517]: E1229 06:57:58.103052 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc7b9ae7 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node functional-695625 status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252594919 +0000 UTC m=+0.268344289,LastTimestamp:2025-12-29 06:54:00.272240811 +0000 UTC m=+0.287990191,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.039929 17440 command_runner.go:130] > Dec 29 06:58:00 functional-695625 kubelet[6517]: E1229 06:58:00.304627 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.039954 17440 command_runner.go:130] > Dec 29 06:58:00 functional-695625 kubelet[6517]: E1229 06:58:00.432518 6517 reconstruct.go:188] "Failed to get Node status to reconstruct device paths" err="the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)"
I1229 06:58:52.039991 17440 command_runner.go:130] > Dec 29 06:58:00 functional-695625 kubelet[6517]: E1229 06:58:00.432667 6517 csi_plugin.go:399] Failed to initialize CSINode: error updating CSINode annotation: timed out waiting for the condition; caused by: the server was unable to return a response in the time allotted, but may still be processing the request (get nodes functional-695625)
I1229 06:58:52.040014 17440 command_runner.go:130] > Dec 29 06:58:10 functional-695625 kubelet[6517]: E1229 06:58:10.305485 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040037 17440 command_runner.go:130] > Dec 29 06:58:11 functional-695625 kubelet[6517]: E1229 06:58:11.012407 6517 kubelet_node_status.go:106] "Unable to register node with API server" err="Timeout: request did not complete within requested timeout - context deadline exceeded" node="functional-695625"
I1229 06:58:52.040068 17440 command_runner.go:130] > Dec 29 06:58:12 functional-695625 kubelet[6517]: E1229 06:58:12.743824 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.040086 17440 command_runner.go:130] > Dec 29 06:58:18 functional-695625 kubelet[6517]: I1229 06:58:18.014210 6517 kubelet_node_status.go:74] "Attempting to register node" node="functional-695625"
I1229 06:58:52.040107 17440 command_runner.go:130] > Dec 29 06:58:20 functional-695625 kubelet[6517]: E1229 06:58:20.306630 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040127 17440 command_runner.go:130] > Dec 29 06:58:24 functional-695625 kubelet[6517]: E1229 06:58:24.186554 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040149 17440 command_runner.go:130] > Dec 29 06:58:24 functional-695625 kubelet[6517]: E1229 06:58:24.186719 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-695625" containerName="kube-scheduler"
I1229 06:58:52.040176 17440 command_runner.go:130] > Dec 29 06:58:29 functional-695625 kubelet[6517]: E1229 06:58:29.745697 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.040195 17440 command_runner.go:130] > Dec 29 06:58:30 functional-695625 kubelet[6517]: E1229 06:58:30.307319 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040256 17440 command_runner.go:130] > Dec 29 06:58:32 functional-695625 kubelet[6517]: E1229 06:58:32.105206 6517 event.go:359] "Server rejected event (will not retry!)" err="Timeout: request did not complete within requested timeout - context deadline exceeded" event="&Event{ObjectMeta:{functional-695625.18859d2bcc791058 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:functional-695625,UID:,APIVersion:v1,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node functional-695625 status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:functional-695625,},FirstTimestamp:2025-12-29 06:54:00.252428376 +0000 UTC m=+0.268177748,LastTimestamp:2025-12-29 06:54:00.286010652 +0000 UTC m=+0.301760032,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:functional-695625,}"
I1229 06:58:52.040279 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.184790 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040300 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.184918 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040319 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: I1229 06:58:39.184949 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.040354 17440 command_runner.go:130] > Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.185100 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040377 17440 command_runner.go:130] > Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.184709 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040397 17440 command_runner.go:130] > Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.184771 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
I1229 06:58:52.040413 17440 command_runner.go:130] > Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.308010 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
I1229 06:58:52.040433 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.185947 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040455 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.186016 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040477 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: I1229 06:58:43.186033 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.040498 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503148 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040520 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503225 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040538 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: I1229 06:58:43.503241 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
I1229 06:58:52.040576 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503356 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040596 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: I1229 06:58:44.519860 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
I1229 06:58:52.040619 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.520985 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040640 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.521063 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040658 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: I1229 06:58:44.521079 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
I1229 06:58:52.040692 17440 command_runner.go:130] > Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.521196 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040711 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537487 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
I1229 06:58:52.040729 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537563 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
I1229 06:58:52.040741 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: I1229 06:58:45.537579 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
I1229 06:58:52.040764 17440 command_runner.go:130] > Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537686 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
I1229 06:58:52.040784 17440 command_runner.go:130] > Dec 29 06:58:46 functional-695625 kubelet[6517]: E1229 06:58:46.747043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
I1229 06:58:52.040807 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
I1229 06:58:52.040815 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
I1229 06:58:52.040821 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
I1229 06:58:52.040830 17440 command_runner.go:130] > Dec 29 06:58:49 functional-695625 systemd[1]: kubelet.service: Consumed 4.400s CPU time, 30.3M memory peak.
I1229 06:58:52.093067 17440 logs.go:123] Gathering logs for dmesg ...
I1229 06:58:52.093106 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1229 06:58:52.108863 17440 command_runner.go:130] > [Dec29 06:52] Booted with the nomodeset parameter. Only the system framebuffer will be available
I1229 06:58:52.108898 17440 command_runner.go:130] > [ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
I1229 06:58:52.108912 17440 command_runner.go:130] > [ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
I1229 06:58:52.108925 17440 command_runner.go:130] > [ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
I1229 06:58:52.108937 17440 command_runner.go:130] > [ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
I1229 06:58:52.108945 17440 command_runner.go:130] > [ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
I1229 06:58:52.108951 17440 command_runner.go:130] > [ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
I1229 06:58:52.108957 17440 command_runner.go:130] > [ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
I1229 06:58:52.108962 17440 command_runner.go:130] > [ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
I1229 06:58:52.108971 17440 command_runner.go:130] > [ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
I1229 06:58:52.108975 17440 command_runner.go:130] > [ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
I1229 06:58:52.108980 17440 command_runner.go:130] > [ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
I1229 06:58:52.108992 17440 command_runner.go:130] > [ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:52.108997 17440 command_runner.go:130] > [ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
I1229 06:58:52.109006 17440 command_runner.go:130] > [Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
I1229 06:58:52.109011 17440 command_runner.go:130] > [ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
I1229 06:58:52.109021 17440 command_runner.go:130] > [ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:52.109031 17440 command_runner.go:130] > [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
I1229 06:58:52.109036 17440 command_runner.go:130] > [ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
I1229 06:58:52.109043 17440 command_runner.go:130] > [ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
I1229 06:58:52.109048 17440 command_runner.go:130] > [ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
I1229 06:58:52.109055 17440 command_runner.go:130] > [Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
I1229 06:58:52.109062 17440 command_runner.go:130] > [ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
I1229 06:58:52.109067 17440 command_runner.go:130] > [ +20.880271] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109072 17440 command_runner.go:130] > [Dec29 06:57] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109080 17440 command_runner.go:130] > [Dec29 06:58] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109088 17440 command_runner.go:130] > [ +6.672596] kauditd_printk_skb: 14 callbacks suppressed
I1229 06:58:52.109931 17440 logs.go:123] Gathering logs for describe nodes ...
I1229 06:58:52.109946 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1229 06:59:52.193646 17440 command_runner.go:130] ! Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
I1229 06:59:52.193695 17440 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": (1m0.083736259s)
W1229 06:59:52.193730 17440 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
output:
** stderr **
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
** /stderr **
I1229 06:59:52.193743 17440 logs.go:123] Gathering logs for Docker ...
I1229 06:59:52.193757 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I1229 06:59:52.211424 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.211464 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.211503 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.211519 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.211538 17440 command_runner.go:130] > Dec 29 06:52:21 minikube cri-dockerd[372]: time="2025-12-29T06:52:21Z" level=fatal msg="failed to get docker version from dockerd: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
I1229 06:59:52.211555 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Main process exited, code=exited, status=1/FAILURE
I1229 06:59:52.211569 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: cri-docker.service: Failed with result 'exit-code'.
I1229 06:59:52.211587 17440 command_runner.go:130] > Dec 29 06:52:21 minikube systemd[1]: Failed to start CRI Interface for Docker Application Container Engine.
I1229 06:59:52.211601 17440 command_runner.go:130] > Dec 29 06:52:22 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.211612 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.020462163Z" level=info msg="Starting up"
I1229 06:59:52.211630 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.027928346Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.211652 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028129610Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.211672 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.028144703Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.211696 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.043277940Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.211714 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.068992169Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.211730 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.093451498Z" level=info msg="Loading containers: start."
I1229 06:59:52.211773 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.245820420Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.211790 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.354124488Z" level=info msg="Loading containers: done."
I1229 06:59:52.211824 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.369556904Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.211841 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.370022229Z" level=info msg="Initializing buildkit"
I1229 06:59:52.211855 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: cri-docker.service: Scheduled restart job, restart counter is at 1.
I1229 06:59:52.211871 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:59:52.211884 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.429481151Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.211899 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437135480Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.211913 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437228150Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.211926 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437499736Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.211948 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 dockerd[618]: time="2025-12-29T06:52:23.437545942Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.211959 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.211970 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.211984 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.212011 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.212025 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.212039 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Hairpin mode is set to none"
I1229 06:59:52.212064 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Loaded network plugin cni"
I1229 06:59:52.212079 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:59:52.212093 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:59:52.212108 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:59:52.212125 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:59:52.212139 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 cri-dockerd[823]: time="2025-12-29T06:52:23Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:59:52.212152 17440 command_runner.go:130] > Dec 29 06:52:23 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:59:52.212172 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250034276Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:59:52.212192 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250065025Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:59:52.212215 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250432086Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.212237 17440 command_runner.go:130] > Dec 29 06:52:24 functional-695625 dockerd[618]: time="2025-12-29T06:52:24.250448972Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.212252 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:59:52.212266 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.004793725Z" level=info msg="Processing signal 'terminated'"
I1229 06:59:52.212285 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006609373Z" level=warning msg="Error while testing if containerd API is ready" error="Canceled: grpc: the client connection is closing"
I1229 06:59:52.212301 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[618]: time="2025-12-29T06:52:25.006865498Z" level=info msg="Daemon shutdown complete"
I1229 06:59:52.212316 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:59:52.212331 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:59:52.212341 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.212357 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.072059214Z" level=info msg="Starting up"
I1229 06:59:52.212372 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079212056Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.212392 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079317481Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.212423 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.079333267Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.212444 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.100712562Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.212461 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.111060819Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.212477 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.125644752Z" level=info msg="Loading containers: start."
I1229 06:59:52.212512 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.269806698Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.212529 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.403684326Z" level=info msg="Loading containers: done."
I1229 06:59:52.212547 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419740189Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.212562 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.419840379Z" level=info msg="Initializing buildkit"
I1229 06:59:52.212577 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.440865810Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.212594 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.450796825Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.212612 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451233366Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.212628 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451367379Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.212643 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 dockerd[1144]: time="2025-12-29T06:52:25.451393479Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.212656 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.212671 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:59:52.212684 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:59:52.212699 17440 command_runner.go:130] > Dec 29 06:52:25 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:59:52.212714 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.212732 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.212751 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.212767 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.212783 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:59:52.212808 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Loaded network plugin cni"
I1229 06:59:52.212827 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:59:52.212844 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:59:52.212864 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:59:52.212881 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:59:52.212899 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:26Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:59:52.212916 17440 command_runner.go:130] > Dec 29 06:52:26 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:59:52.212932 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:59:52.212949 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.014018901Z" level=info msg="Processing signal 'terminated'"
I1229 06:59:52.212974 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.015980570Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
I1229 06:59:52.212995 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1144]: time="2025-12-29T06:52:29.016658114Z" level=info msg="Daemon shutdown complete"
I1229 06:59:52.213006 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:59:52.213020 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:59:52.213033 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.213055 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.080172805Z" level=info msg="Starting up"
I1229 06:59:52.213073 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087153730Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.213094 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087606870Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.213115 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.087791007Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.213135 17440 command_runner.go:130] > Dec 29 06:52:29 functional-695625 dockerd[1647]: time="2025-12-29T06:52:29.102104328Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.213153 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.438808405Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.213169 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.712758412Z" level=info msg="Loading containers: start."
I1229 06:59:52.213204 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.850108278Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.213221 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.981771558Z" level=info msg="Loading containers: done."
I1229 06:59:52.213242 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997281457Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.213258 17440 command_runner.go:130] > Dec 29 06:52:30 functional-695625 dockerd[1647]: time="2025-12-29T06:52:30.997336373Z" level=info msg="Initializing buildkit"
I1229 06:59:52.213275 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.018270012Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.213291 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.027948102Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.213308 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028167710Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.213321 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028236879Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.213334 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 dockerd[1647]: time="2025-12-29T06:52:31.028260561Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.213348 17440 command_runner.go:130] > Dec 29 06:52:31 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.213387 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213414 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213440 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213465 17440 command_runner.go:130] > Dec 29 06:52:35 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213486 17440 command_runner.go:130] > Dec 29 06:52:44 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:44Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
I1229 06:59:52.213507 17440 command_runner.go:130] > Dec 29 06:52:46 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213528 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213549 17440 command_runner.go:130] > Dec 29 06:52:47 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:47Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213573 17440 command_runner.go:130] > Dec 29 06:52:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:52:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.213595 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.687270343Z" level=info msg="ignoring event" container=67027578cf0b79235004d7cd10841e25caaf8524e01d9d37b1cacadb486ee23f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213616 17440 command_runner.go:130] > Dec 29 06:52:53 functional-695625 dockerd[1647]: time="2025-12-29T06:52:53.834054505Z" level=info msg="ignoring event" container=82ebbec1e21171232319e14e7521b1318f7a15d9862e1f988ba0a6f37b46d605 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213637 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154228197Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.213655 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154272599Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=plugins.moby
I1229 06:59:52.213675 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154382560Z" level=error msg="Failed to get event" error="rpc error: code = Unavailable desc = error reading from server: EOF" module=libcontainerd namespace=moby
I1229 06:59:52.213697 17440 command_runner.go:130] > Dec 29 06:53:24 functional-695625 dockerd[1647]: time="2025-12-29T06:53:24.154394909Z" level=info msg="Waiting for containerd to be ready to restart event processing" module=libcontainerd namespace=moby
I1229 06:59:52.213709 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 systemd[1]: Stopping Docker Application Container Engine...
I1229 06:59:52.213724 17440 command_runner.go:130] > Dec 29 06:53:25 functional-695625 dockerd[1647]: time="2025-12-29T06:53:25.157393741Z" level=info msg="Processing signal 'terminated'"
I1229 06:59:52.213735 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.159560262Z" level=error msg="Force shutdown daemon"
I1229 06:59:52.213749 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[1647]: time="2025-12-29T06:53:40.160035445Z" level=info msg="Daemon shutdown complete"
I1229 06:59:52.213759 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Deactivated successfully.
I1229 06:59:52.213774 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Stopped Docker Application Container Engine.
I1229 06:59:52.213786 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: docker.service: Consumed 2.138s CPU time, 29.7M memory peak.
I1229 06:59:52.213809 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 systemd[1]: Starting Docker Application Container Engine...
I1229 06:59:52.213822 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.286623538Z" level=info msg="Starting up"
I1229 06:59:52.213839 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295291170Z" level=info msg="OTEL tracing is not configured, using no-op tracer provider"
I1229 06:59:52.213856 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295480841Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/etc/cdi
I1229 06:59:52.213874 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.295496671Z" level=info msg="CDI directory does not exist, skipping: failed to monitor for changes: no such file or directory" dir=/var/run/cdi
I1229 06:59:52.213891 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.316635284Z" level=info msg="Creating a containerd client" address=/run/containerd/containerd.sock timeout=1m0s
I1229 06:59:52.213907 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.328807793Z" level=info msg="[graphdriver] trying configured driver: overlay2"
I1229 06:59:52.213920 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.355375449Z" level=info msg="Loading containers: start."
I1229 06:59:52.213942 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.681285713Z" level=info msg="ignoring event" container=5024b03252e39eed8a6ab1319b6386d9a846197175f5c2da843e4c5a390148b7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213963 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.755492465Z" level=info msg="ignoring event" container=bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.213985 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.762530714Z" level=info msg="ignoring event" container=64853b50a6c5eae8b7f7796881dd851ed605b45dffe935eb82f288f18c60b24c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214006 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.775670003Z" level=info msg="ignoring event" container=0af491ef7c2f1a8312ee1c51bc20f44ec02abcc65665902a7fb5e969f770e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214028 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.794654459Z" level=info msg="ignoring event" container=8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214055 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.801655844Z" level=info msg="ignoring event" container=548561c7ada8f895644c9b6b62d6e0a4034da8d3d80b4858670645e21d82b8d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214078 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828715029Z" level=info msg="ignoring event" container=ad82b94f76293fed55cae621a103b8910667dd22aa9809da79dec1ae4d921263 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214099 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.828769092Z" level=info msg="ignoring event" container=a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214122 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.845767487Z" level=info msg="ignoring event" container=abbe46bd960e767cec61bab1a2010c730c247bbaffec2c7d29d32dbef73e8a32 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214144 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.864343925Z" level=info msg="ignoring event" container=fe7b5da2f7fb57e50d28df32820adefc7c25530e6e48a5b6d53880680dc58dc6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214166 17440 command_runner.go:130] > Dec 29 06:53:40 functional-695625 dockerd[4014]: time="2025-12-29T06:53:40.865774071Z" level=info msg="ignoring event" container=14aafc386533fecd8b99ec2f19f14752ed432bb1a70922f0cd34af8756fea697 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214190 17440 command_runner.go:130] > Dec 29 06:53:45 functional-695625 dockerd[4014]: time="2025-12-29T06:53:45.656598076Z" level=info msg="ignoring event" container=bd7d900efd487bc7b939fa3b0d25d19771212cf2b966bd0006a6316dc04f5159 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214211 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.636734672Z" level=info msg="ignoring event" container=fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.214242 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.811417108Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.11 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
I1229 06:59:52.214258 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.854503584Z" level=info msg="Removing stale sandbox" cid=a123d63a8edb isRestore=false sid=bee98e10184c
I1229 06:59:52.214283 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.857444846Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 155f23c2cd353f99747cbbed5071c374427d34acfe358ab2da9489f0ecc6dd58 20989221f5da3e18159e9875a44d6ffa354887adacc49a282cdee70b58f0dd06], retrying...."
I1229 06:59:52.214298 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.873316567Z" level=info msg="Removing stale sandbox" cid=0af491ef7c2f isRestore=false sid=043bbf7592a3
I1229 06:59:52.214323 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.875334227Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 abd499ef79857402bb9465b07e26fb0f75693045ea6a45283c4a1a4b13da7c92], retrying...."
I1229 06:59:52.214341 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.887452986Z" level=info msg="Removing stale sandbox" cid=ad82b94f7629 isRestore=false sid=4ae81a2c92d8
I1229 06:59:52.214365 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.890633879Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 ccabc3ec6c0d337909f3a6bfccd1999d5ddec500f785c46c7c1173bb9f142a4d], retrying...."
I1229 06:59:52.214380 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.896180450Z" level=info msg="Removing stale sandbox" cid=5024b03252e3 isRestore=false sid=4f7be10df8fc
I1229 06:59:52.214405 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.898438145Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 0e272d35a10e432b889f2a3f6f048225031acf42b0444ba6b0cc9339f3cb374f], retrying...."
I1229 06:59:52.214421 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.903187461Z" level=info msg="Removing stale sandbox" cid=64853b50a6c5 isRestore=false sid=826a3dc204ef
I1229 06:59:52.214447 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.905271147Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 67cd3d4378e987242bd34247eace592097542682b6c3f23a5a478422e9bfbb3b], retrying...."
I1229 06:59:52.214464 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.910152629Z" level=info msg="Removing stale sandbox" cid=548561c7ada8 isRestore=false sid=94281ce70a77
I1229 06:59:52.214489 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.911967707Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 b513626d489ab85e12802c06e57f2ac0b0298434467c73d2846152ca9481eeae], retrying...."
I1229 06:59:52.214506 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.917235829Z" level=info msg="Removing stale sandbox" cid=fe7b5da2f7fb isRestore=false sid=b5e6c523a381
I1229 06:59:52.214531 17440 command_runner.go:130] > Dec 29 06:53:47 functional-695625 dockerd[4014]: time="2025-12-29T06:53:47.919265802Z" level=warning msg="Error (Unable to complete atomic operation, key modified) deleting object [endpoint 5e51e23da1f1530469b268437db9bee9625cb9f876916ad030013651a498c4a9 bef3c0f56e910ab0a1a698f2eb08c97229abee2b90bf53ab9119cbdba3cb6eaa], retrying...."
I1229 06:59:52.214553 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022090385Z" level=warning msg="error locating sandbox id 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3: sandbox 043bbf7592a30562c3a5db5f6adef7320600a25484c541cc4623be026465ffa3 not found"
I1229 06:59:52.214576 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022147638Z" level=warning msg="error locating sandbox id 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48: sandbox 826a3dc204efcd2a53685e64193c7854d206da1f4b9d3191ff4310e7fa397f48 not found"
I1229 06:59:52.214600 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022162233Z" level=warning msg="error locating sandbox id 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6: sandbox 4ae81a2c92d8455752f7797b351baf4df03723964818db511d20f34eebee79e6 not found"
I1229 06:59:52.214623 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022177741Z" level=warning msg="error locating sandbox id 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e: sandbox 94281ce70a77af2abe1d9e184f9e465429cc20d573c966349f11864787414d7e not found"
I1229 06:59:52.214646 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022193375Z" level=warning msg="error locating sandbox id bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc: sandbox bee98e10184cba7e709f260e6b261c84d9c7e3c73d28f43d4a0e8856c6c40bcc not found"
I1229 06:59:52.214668 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022209936Z" level=warning msg="error locating sandbox id 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2: sandbox 4f7be10df8fc7c6fb8c1b7e4c4d539333974e2b08fb5c7ae02d96c2a907cd9f2 not found"
I1229 06:59:52.214690 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022222477Z" level=warning msg="error locating sandbox id b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0: sandbox b5e6c523a3812d48576001e9e106cedbf60f68221656df22876c21c1fa1554d0 not found"
I1229 06:59:52.214703 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.022440032Z" level=info msg="Loading containers: done."
I1229 06:59:52.214721 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037242165Z" level=info msg="Docker daemon" commit=89c5e8f containerd-snapshotter=false storage-driver=overlay2 version=28.5.2
I1229 06:59:52.214735 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.037335060Z" level=info msg="Initializing buildkit"
I1229 06:59:52.214748 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.057350643Z" level=info msg="Completed buildkit initialization"
I1229 06:59:52.214762 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.066932687Z" level=info msg="Daemon has completed initialization"
I1229 06:59:52.214775 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067089967Z" level=info msg="API listen on /var/run/docker.sock"
I1229 06:59:52.214788 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067190842Z" level=info msg="API listen on /run/docker.sock"
I1229 06:59:52.215123 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 dockerd[4014]: time="2025-12-29T06:53:48.067284257Z" level=info msg="API listen on [::]:2376"
I1229 06:59:52.215148 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Started Docker Application Container Engine.
I1229 06:59:52.215180 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 cri-dockerd[1510]: time="2025-12-29T06:53:48Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a123d63a8edb9ae4246a56e508d8c463cc8d08af29fc9cb9b6e0929aba5d6780\""
I1229 06:59:52.215194 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopping CRI Interface for Docker Application Container Engine...
I1229 06:59:52.215210 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Deactivated successfully.
I1229 06:59:52.215222 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: Stopped CRI Interface for Docker Application Container Engine.
I1229 06:59:52.215233 17440 command_runner.go:130] > Dec 29 06:53:48 functional-695625 systemd[1]: cri-docker.service: Consumed 1.284s CPU time, 18.5M memory peak.
I1229 06:59:52.215247 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Starting CRI Interface for Docker Application Container Engine...
I1229 06:59:52.215265 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting cri-dockerd 0.4.1 (55d6e1a1d6f2ee58949e13a0c66afe7d779ac942)"
I1229 06:59:52.215283 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
I1229 06:59:52.215299 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start docker client with request timeout 0s"
I1229 06:59:52.215312 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Hairpin mode is set to hairpin-veth"
I1229 06:59:52.215324 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Loaded network plugin cni"
I1229 06:59:52.215340 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri networking managed by network plugin cni"
I1229 06:59:52.215355 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Setting cgroupDriver systemd"
I1229 06:59:52.215372 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
I1229 06:59:52.215389 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
I1229 06:59:52.215401 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Start cri-dockerd grpc backend"
I1229 06:59:52.215409 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 systemd[1]: Started CRI Interface for Docker Application Container Engine.
I1229 06:59:52.215430 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215454 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215478 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215500 17440 command_runner.go:130] > Dec 29 06:53:49 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215517 17440 command_runner.go:130] > Dec 29 06:53:50 functional-695625 dockerd[4014]: time="2025-12-29T06:53:50.654005689Z" level=info msg="ignoring event" container=fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215532 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215549 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:51Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": unexpected command output nsenter: cannot open /proc/5603/ns/net: No such file or directory\n with error: exit status 1"
I1229 06:59:52.215565 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.781948864Z" level=info msg="ignoring event" container=17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215578 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.786486841Z" level=info msg="ignoring event" container=1fc5fa7d92959587c9b226fbae1d62a43a53ebff128984dc88d95d1d4b914ffd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215593 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.807329963Z" level=info msg="ignoring event" container=b046056ff071b35753057444705e51c1057b95d46559e1e9b8547d49e18da5a3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215606 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.833907949Z" level=info msg="ignoring event" container=6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215622 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.842344727Z" level=info msg="ignoring event" container=a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215643 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.846952655Z" level=info msg="ignoring event" container=4ed27973347711cbc183631c41c12607349bb00d5aed2e705f31e67f8f401bcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215667 17440 command_runner.go:130] > Dec 29 06:53:51 functional-695625 dockerd[4014]: time="2025-12-29T06:53:51.855675748Z" level=info msg="ignoring event" container=98261fa185f6e8d6798b9786902bd8dacc1c3d2b3c629e497537e2dbfc1811e8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215688 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 dockerd[4014]: time="2025-12-29T06:53:52.089998903Z" level=info msg="ignoring event" container=a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215712 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215738 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215762 17440 command_runner.go:130] > Dec 29 06:53:52 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:52Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215839 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-wfq7m_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"a014f32abcd0141be679b6631a2dba3ddd9f5d2f50102e9808883af1630d0784\""
I1229 06:59:52.215868 17440 command_runner.go:130] > Dec 29 06:53:53 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:53Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215888 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.150956960Z" level=error msg="collecting stats for container /k8s_etcd_etcd-functional-695625_kube-system_8da5c6c8980da2ca920a502b6f312384_1: invalid id: id is empty"
I1229 06:59:52.215912 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: 2025/12/29 06:53:58 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request.(*RespWriterWrapper).writeHeader (resp_writer_wrapper.go:83)
I1229 06:59:52.215937 17440 command_runner.go:130] > Dec 29 06:53:58 functional-695625 dockerd[4014]: time="2025-12-29T06:53:58.741840545Z" level=info msg="ignoring event" container=d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.215959 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:53:59Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1/resolv.conf as [nameserver 192.168.122.1]"
I1229 06:59:52.215979 17440 command_runner.go:130] > Dec 29 06:53:59 functional-695625 cri-dockerd[4884]: W1229 06:53:59.025412 4884 logging.go:59] [core] [Server #1] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
I1229 06:59:52.216007 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216027 17440 command_runner.go:130] > Dec 29 06:54:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:54:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216051 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216067 17440 command_runner.go:130] > Dec 29 06:55:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:55:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216084 17440 command_runner.go:130] > Dec 29 06:56:00 functional-695625 dockerd[4014]: time="2025-12-29T06:56:00.626282205Z" level=info msg="ignoring event" container=78793b793ac7bf212626593654b66a72ee5b6a1a44629c55f4b79db622efccac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216097 17440 command_runner.go:130] > Dec 29 06:56:11 functional-695625 dockerd[4014]: time="2025-12-29T06:56:11.553142622Z" level=info msg="ignoring event" container=18d0015c724a8c309c34f49df00b8349be921326fd871377506d78feeed1dbfb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216112 17440 command_runner.go:130] > Dec 29 06:56:32 functional-695625 dockerd[4014]: time="2025-12-29T06:56:32.448119389Z" level=info msg="ignoring event" container=0ca8df932c9614c55569a494d042cf1b3ccf68510e98b089818e1f61fe2b0cec module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216128 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:59:52.216141 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:59:52.216157 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216171 17440 command_runner.go:130] > Dec 29 06:56:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:56:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216195 17440 command_runner.go:130] > Dec 29 06:57:22 functional-695625 dockerd[4014]: time="2025-12-29T06:57:22.465508622Z" level=info msg="ignoring event" container=b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216222 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:59:52.216243 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:59:52.216263 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216276 17440 command_runner.go:130] > Dec 29 06:57:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:57:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216289 17440 command_runner.go:130] > Dec 29 06:58:43 functional-695625 dockerd[4014]: time="2025-12-29T06:58:43.458641345Z" level=info msg="ignoring event" container=07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216304 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.011072219Z" level=info msg="ignoring event" container=173054afc2f39262ebb1466d26d5d6144bb8704054c087da601130a01d9caaf1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216318 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.102126666Z" level=info msg="ignoring event" container=6b7711ee25a2df71f8c7d296f7186875ebd6ab978a71d33f177de0cc3055645b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216331 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.266578298Z" level=info msg="ignoring event" container=a7b1e961ded554edec9d882d7f1f6093e8446ab1020c81b638de16b76de139b0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216346 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.365376654Z" level=info msg="ignoring event" container=fefef7c5591ea14974a99c19d99f86c4404e25de1b446a0cd0f0bcfffa63a991 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216365 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.452640794Z" level=info msg="ignoring event" container=4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216380 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.557330204Z" level=info msg="ignoring event" container=d3819cc8ab802e5145e47325398f1da69b88a241482842040339b6b0d609a176 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216392 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.666151542Z" level=info msg="ignoring event" container=0a96e34d38f8c1eccbbdf73d99dbbbe353acea505d84b69f0fdd4e54cb811123 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216409 17440 command_runner.go:130] > Dec 29 06:58:50 functional-695625 dockerd[4014]: time="2025-12-29T06:58:50.751481082Z" level=info msg="ignoring event" container=f48fc04e347519b276e239ee9a6b0b8e093862313e46174a1815efae670eec9b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
I1229 06:59:52.216427 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535': Error response from daemon: No such container: 4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535"
I1229 06:59:52.216440 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535'"
I1229 06:59:52.216455 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
I1229 06:59:52.216467 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
I1229 06:59:52.216484 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be': Error response from daemon: No such container: bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be"
I1229 06:59:52.216495 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be'"
I1229 06:59:52.216512 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e': Error response from daemon: No such container: a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e"
I1229 06:59:52.216525 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e'"
I1229 06:59:52.216542 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'd81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974': Error response from daemon: No such container: d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
I1229 06:59:52.216554 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'd81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974'"
I1229 06:59:52.216568 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00': Error response from daemon: No such container: 6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
I1229 06:59:52.216582 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00'"
I1229 06:59:52.216596 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
I1229 06:59:52.216611 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
I1229 06:59:52.216628 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d': Error response from daemon: No such container: 8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d"
I1229 06:59:52.216642 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d'"
I1229 06:59:52.216660 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13': Error response from daemon: No such container: 17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
I1229 06:59:52.216673 17440 command_runner.go:130] > Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13'"
I1229 06:59:52.238629 17440 logs.go:123] Gathering logs for container status ...
I1229 06:59:52.238668 17440 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1229 06:59:52.287732 17440 command_runner.go:130] > CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
W1229 06:59:52.290016 17440 out.go:434] Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
W1229 06:59:52.290080 17440 out.go:285] *
W1229 06:59:52.290145 17440 out.go:285] X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
W1229 06:59:52.290156 17440 out.go:285] *
W1229 06:59:52.290452 17440 out.go:308] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I1229 06:59:52.293734 17440 out.go:203]
W1229 06:59:52.295449 17440 out.go:285] X Exiting due to GUEST_PORT_IN_USE: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
stderr:
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Some fatal errors occurred:
[ERROR Port-8441]: Port 8441 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
error: error execution phase preflight: preflight checks failed
To see the stack trace of this error execute with --v=5 or higher
W1229 06:59:52.295482 17440 out.go:285] * Suggestion: kubeadm detected a TCP port conflict with another process: probably another local Kubernetes installation. Run lsof -p<port> to find the process and kill it
W1229 06:59:52.295500 17440 out.go:285] * Related issue: https://github.com/kubernetes/minikube/issues/5484
I1229 06:59:52.296904 17440 out.go:203]
==> Docker <==
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00'"
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d': Error response from daemon: No such container: 8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d"
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d'"
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="error getting RW layer size for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13': Error response from daemon: No such container: 17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
Dec 29 06:58:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:58:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID '4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535': Error response from daemon: No such container: 4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '4d49952084c921663c4ca3a5954c1e5f3579ae4ede51cd2af5f26d39cffeb535'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b': Error response from daemon: No such container: fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fb6db97d8ffe47f841dad5663bec255840cbd95c984cdcea62e4a40ce9aadf6b'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID 'bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be': Error response from daemon: No such container: bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'bd96b57aa9fceb297b978973bf1ec18d239034f519208bcbbdb6e3642bd688be'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID 'd81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974': Error response from daemon: No such container: d81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'd81259f64136cb875391e3242b1e87ce8484d93804fd3fd8f058e794000af974'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID 'a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e': Error response from daemon: No such container: a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'a79d99ad3fde3b39ff452b10ae85c19ada97b63b0d02bd1df136d6abdc0aab3e'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13': Error response from daemon: No such container: 17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '17fe16a2822a8e49aab04292eeabfe463223d6c2df3f3c9cb22a3638b3ceab13'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd': Error response from daemon: No such container: fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'fd22eb0d6c14aa574181a25c39c62a49aca8e387257f4656bcd9f72653cd22fd'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00': Error response from daemon: No such container: 6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '6f69ba6a1553a587ecf566f8e32713045c125b882d7d42b21f53e313e21aed00'"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="error getting RW layer size for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d': Error response from daemon: No such container: 8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d"
Dec 29 06:59:58 functional-695625 cri-dockerd[4884]: time="2025-12-29T06:59:58Z" level=error msg="Set backoffDuration to : 1m0s for container ID '8911777281f410454c05e6fe7890cd18afd703aba8c259833fbd1b9504e6954d'"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
==> describe nodes <==
command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get nodes)
==> dmesg <==
[ +0.000088] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.005634] (rpcbind)[119]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.202886] crun[405]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set
[ +0.971059] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.268875] kauditd_printk_skb: 32 callbacks suppressed
[ +0.123569] kauditd_printk_skb: 342 callbacks suppressed
[ +0.099711] kauditd_printk_skb: 205 callbacks suppressed
[ +0.170782] kauditd_printk_skb: 221 callbacks suppressed
[ +0.199839] kauditd_printk_skb: 18 callbacks suppressed
[ +0.025660] kauditd_printk_skb: 318 callbacks suppressed
[Dec29 06:53] kauditd_printk_skb: 19 callbacks suppressed
[ +15.204939] kauditd_printk_skb: 18 callbacks suppressed
[ +0.333829] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
[ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
[ +4.976278] kauditd_printk_skb: 42 callbacks suppressed
[ +0.830497] kauditd_printk_skb: 396 callbacks suppressed
[ +5.294312] kauditd_printk_skb: 231 callbacks suppressed
[Dec29 06:56] kauditd_printk_skb: 36 callbacks suppressed
[ +10.952068] kauditd_printk_skb: 66 callbacks suppressed
[ +20.880271] kauditd_printk_skb: 14 callbacks suppressed
[Dec29 06:57] kauditd_printk_skb: 14 callbacks suppressed
[Dec29 06:58] kauditd_printk_skb: 14 callbacks suppressed
[ +6.672596] kauditd_printk_skb: 14 callbacks suppressed
==> kernel <==
07:01:09 up 8 min, 0 users, load average: 0.03, 0.23, 0.16
Linux functional-695625 6.6.95 #1 SMP PREEMPT_DYNAMIC Mon Dec 29 06:17:23 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kubelet <==
Dec 29 06:58:39 functional-695625 kubelet[6517]: E1229 06:58:39.185100 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.184709 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.184771 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-695625" containerName="etcd"
Dec 29 06:58:40 functional-695625 kubelet[6517]: E1229 06:58:40.308010 6517 eviction_manager.go:297] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"functional-695625\" not found"
Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.185947 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.186016 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
Dec 29 06:58:43 functional-695625 kubelet[6517]: I1229 06:58:43.186033 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503148 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503225 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
Dec 29 06:58:43 functional-695625 kubelet[6517]: I1229 06:58:43.503241 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
Dec 29 06:58:43 functional-695625 kubelet[6517]: E1229 06:58:43.503356 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
Dec 29 06:58:44 functional-695625 kubelet[6517]: I1229 06:58:44.519860 6517 scope.go:122] "RemoveContainer" containerID="b206d555ad194fa8eb29c391078f91a79baa156c83dfbefe92f8772dfc2c4cbc"
Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.520985 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.521063 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
Dec 29 06:58:44 functional-695625 kubelet[6517]: I1229 06:58:44.521079 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
Dec 29 06:58:44 functional-695625 kubelet[6517]: E1229 06:58:44.521196 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537487 6517 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"functional-695625\" not found" node="functional-695625"
Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537563 6517 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-695625" containerName="kube-apiserver"
Dec 29 06:58:45 functional-695625 kubelet[6517]: I1229 06:58:45.537579 6517 scope.go:122] "RemoveContainer" containerID="07a17306156372940966dc7c7e00122a99f1c0f6e78ddc5e4c0cb67f3cff1817"
Dec 29 06:58:45 functional-695625 kubelet[6517]: E1229 06:58:45.537686 6517 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-apiserver pod=kube-apiserver-functional-695625_kube-system(d173c000af26dcef62569d3a5345fcae)\"" pod="kube-system/kube-apiserver-functional-695625" podUID="d173c000af26dcef62569d3a5345fcae"
Dec 29 06:58:46 functional-695625 kubelet[6517]: E1229 06:58:46.747043 6517 controller.go:201] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.39.121:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-695625?timeout=10s\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" interval="7s"
Dec 29 06:58:49 functional-695625 systemd[1]: Stopping kubelet: The Kubernetes Node Agent...
Dec 29 06:58:49 functional-695625 systemd[1]: kubelet.service: Deactivated successfully.
Dec 29 06:58:49 functional-695625 systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
Dec 29 06:58:49 functional-695625 systemd[1]: kubelet.service: Consumed 4.400s CPU time, 30.3M memory peak.
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-695625 -n functional-695625
helpers_test.go:263: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-695625 -n functional-695625: exit status 2 (15.784701968s)
-- stdout --
Stopped
-- /stdout --
helpers_test.go:263: status error: exit status 2 (may be ok)
helpers_test.go:265: "functional-695625" apiserver is not running, skipping kubectl commands (state="Stopped")
--- FAIL: TestFunctional/serial/SoftStart (484.11s)