=== RUN TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig
functional_test.go:772: (dbg) Run: out/minikube-linux-amd64 start -p functional-240388 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E1217 19:33:53.568386 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/addons-743931/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:34:21.273576 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/addons-743931/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.408975 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.415455 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.426957 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.448466 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.489957 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.571444 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:10.733050 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:11.054858 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:11.697026 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:12.978662 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:15.541770 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:20.663185 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:30.905466 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:35:51.387029 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1217 19:36:32.350382 259985 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-750489/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:772: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-240388 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: exit status 80 (4m45.319104699s)
-- stdout --
* [functional-240388] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
- MINIKUBE_LOCATION=22186
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/22186-255930/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/22186-255930/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the kvm2 driver based on existing profile
* Starting "functional-240388" primary control-plane node in "functional-240388" cluster
- apiserver.enable-admission-plugins=NamespaceAutoProvision
* Configuring bridge CNI (Container Networking Interface) ...
* Verifying Kubernetes components...
- Using image gcr.io/k8s-minikube/storage-provisioner:v5
* Enabled addons: storage-provisioner, default-storageclass
-- /stdout --
** stderr **
X Exiting due to GUEST_START: extra waiting: WaitExtra: context deadline exceeded
** /stderr **
functional_test.go:774: failed to restart minikube. args "out/minikube-linux-amd64 start -p functional-240388 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all": exit status 80
functional_test.go:776: restart took 4m45.319365241s for "functional-240388" cluster.
I1217 19:37:51.467495 259985 config.go:182] Loaded profile config "functional-240388": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0-rc.1
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:248: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-240388 -n functional-240388
helpers_test.go:253: <<< TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-amd64 -p functional-240388 logs -n 25
helpers_test.go:261: TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig logs:
-- stdout --
==> Audit <==
┌─────────┬──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ image │ functional-750489 image ls --format table --alsologtostderr │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:30 UTC │ 17 Dec 25 19:30 UTC │
│ service │ functional-750489 service list │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:30 UTC │ 17 Dec 25 19:30 UTC │
│ service │ functional-750489 service list -o json │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:30 UTC │ 17 Dec 25 19:31 UTC │
│ service │ functional-750489 service --namespace=default --https --url hello-node │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:31 UTC │ 17 Dec 25 19:31 UTC │
│ service │ functional-750489 service hello-node --url --format={{.IP}} │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:31 UTC │ 17 Dec 25 19:31 UTC │
│ service │ functional-750489 service hello-node --url │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:31 UTC │ 17 Dec 25 19:31 UTC │
│ delete │ -p functional-750489 │ functional-750489 │ jenkins │ v1.37.0 │ 17 Dec 25 19:31 UTC │ 17 Dec 25 19:31 UTC │
│ start │ -p functional-240388 --memory=4096 --apiserver-port=8441 --wait=all --driver=kvm2 --kubernetes-version=v1.35.0-rc.1 │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:31 UTC │ 17 Dec 25 19:31 UTC │
│ start │ -p functional-240388 --alsologtostderr -v=8 │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:31 UTC │ 17 Dec 25 19:32 UTC │
│ cache │ functional-240388 cache add registry.k8s.io/pause:3.1 │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:32 UTC │ 17 Dec 25 19:32 UTC │
│ cache │ functional-240388 cache add registry.k8s.io/pause:3.3 │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:32 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ functional-240388 cache add registry.k8s.io/pause:latest │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ functional-240388 cache add minikube-local-cache-test:functional-240388 │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ functional-240388 cache delete minikube-local-cache-test:functional-240388 │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ delete registry.k8s.io/pause:3.3 │ minikube │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ list │ minikube │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ ssh │ functional-240388 ssh sudo crictl images │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ ssh │ functional-240388 ssh sudo docker rmi registry.k8s.io/pause:latest │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ ssh │ functional-240388 ssh sudo crictl inspecti registry.k8s.io/pause:latest │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ │
│ cache │ functional-240388 cache reload │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ ssh │ functional-240388 ssh sudo crictl inspecti registry.k8s.io/pause:latest │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ delete registry.k8s.io/pause:3.1 │ minikube │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ cache │ delete registry.k8s.io/pause:latest │ minikube │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ kubectl │ functional-240388 kubectl -- --context functional-240388 get pods │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ 17 Dec 25 19:33 UTC │
│ start │ -p functional-240388 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all │ functional-240388 │ jenkins │ v1.37.0 │ 17 Dec 25 19:33 UTC │ │
└─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/12/17 19:33:06
Running on machine: ubuntu-20-agent-9
Binary: Built with gc go1.25.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1217 19:33:06.203503 267684 out.go:360] Setting OutFile to fd 1 ...
I1217 19:33:06.203782 267684 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1217 19:33:06.203786 267684 out.go:374] Setting ErrFile to fd 2...
I1217 19:33:06.203789 267684 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1217 19:33:06.204003 267684 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22186-255930/.minikube/bin
I1217 19:33:06.204492 267684 out.go:368] Setting JSON to false
I1217 19:33:06.205478 267684 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-9","uptime":4530,"bootTime":1765995456,"procs":175,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1045-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1217 19:33:06.205528 267684 start.go:143] virtualization: kvm guest
I1217 19:33:06.207441 267684 out.go:179] * [functional-240388] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1217 19:33:06.208646 267684 out.go:179] - MINIKUBE_LOCATION=22186
I1217 19:33:06.208699 267684 notify.go:221] Checking for updates...
I1217 19:33:06.211236 267684 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1217 19:33:06.212698 267684 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22186-255930/kubeconfig
I1217 19:33:06.213817 267684 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22186-255930/.minikube
I1217 19:33:06.215252 267684 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1217 19:33:06.216551 267684 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1217 19:33:06.218219 267684 config.go:182] Loaded profile config "functional-240388": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0-rc.1
I1217 19:33:06.218312 267684 driver.go:422] Setting default libvirt URI to qemu:///system
I1217 19:33:06.251540 267684 out.go:179] * Using the kvm2 driver based on existing profile
I1217 19:33:06.252759 267684 start.go:309] selected driver: kvm2
I1217 19:33:06.252769 267684 start.go:927] validating driver "kvm2" against &{Name:functional-240388 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.35.0-rc.1 ClusterName:functional-240388 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.22 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountM
Size:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1217 19:33:06.252872 267684 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1217 19:33:06.253839 267684 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1217 19:33:06.253869 267684 cni.go:84] Creating CNI manager for ""
I1217 19:33:06.253927 267684 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1217 19:33:06.253968 267684 start.go:353] cluster config:
{Name:functional-240388 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-240388 Namespace:default APIS
erverHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.22 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: Mount
MSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1217 19:33:06.254054 267684 iso.go:125] acquiring lock: {Name:mkeac5b890dbb93d0e36dd357fe6f0cc980f247e Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1217 19:33:06.256031 267684 out.go:179] * Starting "functional-240388" primary control-plane node in "functional-240388" cluster
I1217 19:33:06.257078 267684 preload.go:188] Checking if preload exists for k8s version v1.35.0-rc.1 and runtime docker
I1217 19:33:06.257104 267684 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22186-255930/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-rc.1-docker-overlay2-amd64.tar.lz4
I1217 19:33:06.257110 267684 cache.go:65] Caching tarball of preloaded images
I1217 19:33:06.257199 267684 preload.go:238] Found /home/jenkins/minikube-integration/22186-255930/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-rc.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1217 19:33:06.257207 267684 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0-rc.1 on docker
I1217 19:33:06.257315 267684 profile.go:143] Saving config to /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/config.json ...
I1217 19:33:06.257529 267684 start.go:360] acquireMachinesLock for functional-240388: {Name:mkc3bc9f6c99eb74eb5c5fedf7f00499ebad23f0 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1217 19:33:06.257571 267684 start.go:364] duration metric: took 27.823µs to acquireMachinesLock for "functional-240388"
I1217 19:33:06.257581 267684 start.go:96] Skipping create...Using existing machine configuration
I1217 19:33:06.257585 267684 fix.go:54] fixHost starting:
I1217 19:33:06.259464 267684 fix.go:112] recreateIfNeeded on functional-240388: state=Running err=<nil>
W1217 19:33:06.259480 267684 fix.go:138] unexpected machine state, will restart: <nil>
I1217 19:33:06.261165 267684 out.go:252] * Updating the running kvm2 "functional-240388" VM ...
I1217 19:33:06.261187 267684 machine.go:94] provisionDockerMachine start ...
I1217 19:33:06.263928 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.264385 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.264410 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.264635 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:06.264883 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:06.264889 267684 main.go:143] libmachine: About to run SSH command:
hostname
I1217 19:33:06.378717 267684 main.go:143] libmachine: SSH cmd err, output: <nil>: functional-240388
I1217 19:33:06.378738 267684 buildroot.go:166] provisioning hostname "functional-240388"
I1217 19:33:06.382239 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.382773 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.382796 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.383019 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:06.383275 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:06.383283 267684 main.go:143] libmachine: About to run SSH command:
sudo hostname functional-240388 && echo "functional-240388" | sudo tee /etc/hostname
I1217 19:33:06.513472 267684 main.go:143] libmachine: SSH cmd err, output: <nil>: functional-240388
I1217 19:33:06.516442 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.516888 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.516905 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.517149 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:06.517343 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:06.517355 267684 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-240388' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-240388/g' /etc/hosts;
else
echo '127.0.1.1 functional-240388' | sudo tee -a /etc/hosts;
fi
fi
I1217 19:33:06.629940 267684 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1217 19:33:06.629963 267684 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/22186-255930/.minikube CaCertPath:/home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22186-255930/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22186-255930/.minikube}
I1217 19:33:06.630013 267684 buildroot.go:174] setting up certificates
I1217 19:33:06.630022 267684 provision.go:84] configureAuth start
I1217 19:33:06.632827 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.633218 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.633234 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.635673 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.636028 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.636051 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.636175 267684 provision.go:143] copyHostCerts
I1217 19:33:06.636228 267684 exec_runner.go:144] found /home/jenkins/minikube-integration/22186-255930/.minikube/ca.pem, removing ...
I1217 19:33:06.636238 267684 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22186-255930/.minikube/ca.pem
I1217 19:33:06.636309 267684 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22186-255930/.minikube/ca.pem (1082 bytes)
I1217 19:33:06.636400 267684 exec_runner.go:144] found /home/jenkins/minikube-integration/22186-255930/.minikube/cert.pem, removing ...
I1217 19:33:06.636404 267684 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22186-255930/.minikube/cert.pem
I1217 19:33:06.636429 267684 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22186-255930/.minikube/cert.pem (1123 bytes)
I1217 19:33:06.636482 267684 exec_runner.go:144] found /home/jenkins/minikube-integration/22186-255930/.minikube/key.pem, removing ...
I1217 19:33:06.636485 267684 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22186-255930/.minikube/key.pem
I1217 19:33:06.636506 267684 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22186-255930/.minikube/key.pem (1675 bytes)
I1217 19:33:06.636551 267684 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22186-255930/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca-key.pem org=jenkins.functional-240388 san=[127.0.0.1 192.168.39.22 functional-240388 localhost minikube]
I1217 19:33:06.786573 267684 provision.go:177] copyRemoteCerts
I1217 19:33:06.786659 267684 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1217 19:33:06.789975 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.790330 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.790345 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.790477 267684 sshutil.go:53] new ssh client: &{IP:192.168.39.22 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/functional-240388/id_rsa Username:docker}
I1217 19:33:06.879675 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1217 19:33:06.911239 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1217 19:33:06.942304 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1217 19:33:06.972692 267684 provision.go:87] duration metric: took 342.657497ms to configureAuth
I1217 19:33:06.972712 267684 buildroot.go:189] setting minikube options for container-runtime
I1217 19:33:06.972901 267684 config.go:182] Loaded profile config "functional-240388": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0-rc.1
I1217 19:33:06.975759 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.976128 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:06.976144 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:06.976309 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:06.976500 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:06.976505 267684 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1217 19:33:07.088739 267684 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1217 19:33:07.088756 267684 buildroot.go:70] root file system type: tmpfs
I1217 19:33:07.088852 267684 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1217 19:33:07.092317 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.092778 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.092795 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.092955 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:07.093202 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:07.093245 267684 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1217 19:33:07.228187 267684 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1217 19:33:07.231148 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.231515 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.231529 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.231733 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:07.231924 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:07.231933 267684 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1217 19:33:07.348208 267684 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1217 19:33:07.348223 267684 machine.go:97] duration metric: took 1.087029537s to provisionDockerMachine
I1217 19:33:07.348235 267684 start.go:293] postStartSetup for "functional-240388" (driver="kvm2")
I1217 19:33:07.348246 267684 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1217 19:33:07.348303 267684 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1217 19:33:07.351188 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.351680 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.351698 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.351844 267684 sshutil.go:53] new ssh client: &{IP:192.168.39.22 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/functional-240388/id_rsa Username:docker}
I1217 19:33:07.437905 267684 ssh_runner.go:195] Run: cat /etc/os-release
I1217 19:33:07.443173 267684 info.go:137] Remote host: Buildroot 2025.02
I1217 19:33:07.443192 267684 filesync.go:126] Scanning /home/jenkins/minikube-integration/22186-255930/.minikube/addons for local assets ...
I1217 19:33:07.443261 267684 filesync.go:126] Scanning /home/jenkins/minikube-integration/22186-255930/.minikube/files for local assets ...
I1217 19:33:07.443368 267684 filesync.go:149] local asset: /home/jenkins/minikube-integration/22186-255930/.minikube/files/etc/ssl/certs/2599852.pem -> 2599852.pem in /etc/ssl/certs
I1217 19:33:07.443455 267684 filesync.go:149] local asset: /home/jenkins/minikube-integration/22186-255930/.minikube/files/etc/test/nested/copy/259985/hosts -> hosts in /etc/test/nested/copy/259985
I1217 19:33:07.443494 267684 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/259985
I1217 19:33:07.456195 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/files/etc/ssl/certs/2599852.pem --> /etc/ssl/certs/2599852.pem (1708 bytes)
I1217 19:33:07.487969 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/files/etc/test/nested/copy/259985/hosts --> /etc/test/nested/copy/259985/hosts (40 bytes)
I1217 19:33:07.519505 267684 start.go:296] duration metric: took 171.253835ms for postStartSetup
I1217 19:33:07.519546 267684 fix.go:56] duration metric: took 1.261959532s for fixHost
I1217 19:33:07.522654 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.523039 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.523063 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.523261 267684 main.go:143] libmachine: Using SSH client type: native
I1217 19:33:07.523466 267684 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84e1a0] 0x850e40 <nil> [] 0s} 192.168.39.22 22 <nil> <nil>}
I1217 19:33:07.523470 267684 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1217 19:33:07.636250 267684 main.go:143] libmachine: SSH cmd err, output: <nil>: 1765999987.632110773
I1217 19:33:07.636266 267684 fix.go:216] guest clock: 1765999987.632110773
I1217 19:33:07.636274 267684 fix.go:229] Guest: 2025-12-17 19:33:07.632110773 +0000 UTC Remote: 2025-12-17 19:33:07.519549795 +0000 UTC m=+1.366822896 (delta=112.560978ms)
I1217 19:33:07.636297 267684 fix.go:200] guest clock delta is within tolerance: 112.560978ms
I1217 19:33:07.636302 267684 start.go:83] releasing machines lock for "functional-240388", held for 1.378724961s
I1217 19:33:07.639671 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.640215 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.640235 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.640830 267684 ssh_runner.go:195] Run: cat /version.json
I1217 19:33:07.640915 267684 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1217 19:33:07.643978 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.644315 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.644329 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.644334 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.644500 267684 sshutil.go:53] new ssh client: &{IP:192.168.39.22 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/functional-240388/id_rsa Username:docker}
I1217 19:33:07.644911 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:07.644934 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:07.645119 267684 sshutil.go:53] new ssh client: &{IP:192.168.39.22 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/functional-240388/id_rsa Username:docker}
I1217 19:33:07.727454 267684 ssh_runner.go:195] Run: systemctl --version
I1217 19:33:07.761328 267684 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1217 19:33:07.768413 267684 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1217 19:33:07.768480 267684 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1217 19:33:07.781436 267684 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1217 19:33:07.781462 267684 start.go:496] detecting cgroup driver to use...
I1217 19:33:07.781587 267684 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1217 19:33:07.808240 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1217 19:33:07.822696 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1217 19:33:07.836690 267684 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1217 19:33:07.836752 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1217 19:33:07.850854 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1217 19:33:07.865319 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1217 19:33:07.881786 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1217 19:33:07.896674 267684 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1217 19:33:07.913883 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1217 19:33:07.928739 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1217 19:33:07.943427 267684 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1217 19:33:07.958124 267684 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1217 19:33:07.969975 267684 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1217 19:33:07.983204 267684 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1217 19:33:08.188650 267684 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1217 19:33:08.241722 267684 start.go:496] detecting cgroup driver to use...
I1217 19:33:08.241799 267684 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1217 19:33:08.261139 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1217 19:33:08.279259 267684 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1217 19:33:08.309361 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1217 19:33:08.326823 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1217 19:33:08.343826 267684 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1217 19:33:08.368675 267684 ssh_runner.go:195] Run: which cri-dockerd
I1217 19:33:08.373233 267684 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1217 19:33:08.386017 267684 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1217 19:33:08.407873 267684 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1217 19:33:08.615617 267684 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1217 19:33:08.826661 267684 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1217 19:33:08.826828 267684 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1217 19:33:08.852952 267684 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1217 19:33:08.869029 267684 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1217 19:33:09.065134 267684 ssh_runner.go:195] Run: sudo systemctl restart docker
I1217 19:33:40.054438 267684 ssh_runner.go:235] Completed: sudo systemctl restart docker: (30.989264208s)
I1217 19:33:40.054535 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1217 19:33:40.092868 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1217 19:33:40.125996 267684 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1217 19:33:40.170501 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1217 19:33:40.189408 267684 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1217 19:33:40.345265 267684 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1217 19:33:40.504425 267684 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1217 19:33:40.661370 267684 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1217 19:33:40.704620 267684 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1217 19:33:40.720078 267684 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1217 19:33:40.910684 267684 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1217 19:33:41.031296 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1217 19:33:41.051233 267684 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1217 19:33:41.051302 267684 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1217 19:33:41.057808 267684 start.go:564] Will wait 60s for crictl version
I1217 19:33:41.057880 267684 ssh_runner.go:195] Run: which crictl
I1217 19:33:41.062048 267684 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1217 19:33:41.095492 267684 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.2
RuntimeApiVersion: v1
I1217 19:33:41.095556 267684 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1217 19:33:41.122830 267684 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1217 19:33:41.151134 267684 out.go:252] * Preparing Kubernetes v1.35.0-rc.1 on Docker 28.5.2 ...
I1217 19:33:41.154049 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:41.154487 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:41.154506 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:41.154685 267684 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I1217 19:33:41.161212 267684 out.go:179] - apiserver.enable-admission-plugins=NamespaceAutoProvision
I1217 19:33:41.162920 267684 kubeadm.go:884] updating cluster {Name:functional-240388 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.35.0-rc.1 ClusterName:functional-240388 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.22 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Moun
tString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1217 19:33:41.163089 267684 preload.go:188] Checking if preload exists for k8s version v1.35.0-rc.1 and runtime docker
I1217 19:33:41.163139 267684 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1217 19:33:41.191761 267684 docker.go:691] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-240388
registry.k8s.io/kube-controller-manager:v1.35.0-rc.1
registry.k8s.io/kube-scheduler:v1.35.0-rc.1
registry.k8s.io/kube-apiserver:v1.35.0-rc.1
registry.k8s.io/kube-proxy:v1.35.0-rc.1
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/pause:latest
-- /stdout --
I1217 19:33:41.191775 267684 docker.go:621] Images already preloaded, skipping extraction
I1217 19:33:41.191834 267684 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1217 19:33:41.239915 267684 docker.go:691] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-240388
registry.k8s.io/kube-controller-manager:v1.35.0-rc.1
registry.k8s.io/kube-scheduler:v1.35.0-rc.1
registry.k8s.io/kube-apiserver:v1.35.0-rc.1
registry.k8s.io/kube-proxy:v1.35.0-rc.1
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/pause:latest
-- /stdout --
I1217 19:33:41.239930 267684 cache_images.go:86] Images are preloaded, skipping loading
I1217 19:33:41.239939 267684 kubeadm.go:935] updating node { 192.168.39.22 8441 v1.35.0-rc.1 docker true true} ...
I1217 19:33:41.240072 267684 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0-rc.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-240388 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.22
[Install]
config:
{KubernetesVersion:v1.35.0-rc.1 ClusterName:functional-240388 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1217 19:33:41.240179 267684 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1217 19:33:41.426914 267684 extraconfig.go:125] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
I1217 19:33:41.426938 267684 cni.go:84] Creating CNI manager for ""
I1217 19:33:41.426957 267684 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1217 19:33:41.426971 267684 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1217 19:33:41.426995 267684 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.39.22 APIServerPort:8441 KubernetesVersion:v1.35.0-rc.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-240388 NodeName:functional-240388 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.22"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.22 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletCon
figOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1217 19:33:41.427126 267684 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.22
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "functional-240388"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.22"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.22"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceAutoProvision"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0-rc.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1217 19:33:41.427217 267684 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0-rc.1
I1217 19:33:41.453808 267684 binaries.go:51] Found k8s binaries, skipping transfer
I1217 19:33:41.453878 267684 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1217 19:33:41.474935 267684 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (322 bytes)
I1217 19:33:41.530294 267684 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (357 bytes)
I1217 19:33:41.601303 267684 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2076 bytes)
I1217 19:33:41.724558 267684 ssh_runner.go:195] Run: grep 192.168.39.22 control-plane.minikube.internal$ /etc/hosts
I1217 19:33:41.735088 267684 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1217 19:33:42.073369 267684 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1217 19:33:42.122754 267684 certs.go:69] Setting up /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388 for IP: 192.168.39.22
I1217 19:33:42.122769 267684 certs.go:195] generating shared ca certs ...
I1217 19:33:42.122787 267684 certs.go:227] acquiring lock for ca certs: {Name:mk41d44cf7495c219db6c5af86332dabe9b164c0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1217 19:33:42.122952 267684 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22186-255930/.minikube/ca.key
I1217 19:33:42.122986 267684 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22186-255930/.minikube/proxy-client-ca.key
I1217 19:33:42.122993 267684 certs.go:257] generating profile certs ...
I1217 19:33:42.123066 267684 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/client.key
I1217 19:33:42.123140 267684 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/apiserver.key.69fe0bcf
I1217 19:33:42.123174 267684 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/proxy-client.key
I1217 19:33:42.123282 267684 certs.go:484] found cert: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/259985.pem (1338 bytes)
W1217 19:33:42.123309 267684 certs.go:480] ignoring /home/jenkins/minikube-integration/22186-255930/.minikube/certs/259985_empty.pem, impossibly tiny 0 bytes
I1217 19:33:42.123314 267684 certs.go:484] found cert: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca-key.pem (1675 bytes)
I1217 19:33:42.123336 267684 certs.go:484] found cert: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/ca.pem (1082 bytes)
I1217 19:33:42.123355 267684 certs.go:484] found cert: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/cert.pem (1123 bytes)
I1217 19:33:42.123374 267684 certs.go:484] found cert: /home/jenkins/minikube-integration/22186-255930/.minikube/certs/key.pem (1675 bytes)
I1217 19:33:42.123410 267684 certs.go:484] found cert: /home/jenkins/minikube-integration/22186-255930/.minikube/files/etc/ssl/certs/2599852.pem (1708 bytes)
I1217 19:33:42.123979 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1217 19:33:42.258305 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1217 19:33:42.322504 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1217 19:33:42.483405 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1217 19:33:42.625002 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1217 19:33:42.692761 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1217 19:33:42.745086 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1217 19:33:42.793780 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/profiles/functional-240388/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1217 19:33:42.841690 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/files/etc/ssl/certs/2599852.pem --> /usr/share/ca-certificates/2599852.pem (1708 bytes)
I1217 19:33:42.891346 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1217 19:33:42.933420 267684 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22186-255930/.minikube/certs/259985.pem --> /usr/share/ca-certificates/259985.pem (1338 bytes)
I1217 19:33:42.962368 267684 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1217 19:33:42.984665 267684 ssh_runner.go:195] Run: openssl version
I1217 19:33:42.992262 267684 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/259985.pem
I1217 19:33:43.005810 267684 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/259985.pem /etc/ssl/certs/259985.pem
I1217 19:33:43.028391 267684 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/259985.pem
I1217 19:33:43.034079 267684 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 17 19:31 /usr/share/ca-certificates/259985.pem
I1217 19:33:43.034138 267684 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/259985.pem
I1217 19:33:43.041611 267684 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I1217 19:33:43.053628 267684 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/2599852.pem
I1217 19:33:43.065540 267684 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/2599852.pem /etc/ssl/certs/2599852.pem
I1217 19:33:43.076857 267684 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2599852.pem
I1217 19:33:43.082352 267684 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 17 19:31 /usr/share/ca-certificates/2599852.pem
I1217 19:33:43.082399 267684 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2599852.pem
I1217 19:33:43.089508 267684 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I1217 19:33:43.101894 267684 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I1217 19:33:43.113900 267684 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I1217 19:33:43.126463 267684 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1217 19:33:43.131738 267684 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 17 19:20 /usr/share/ca-certificates/minikubeCA.pem
I1217 19:33:43.131806 267684 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1217 19:33:43.139221 267684 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I1217 19:33:43.151081 267684 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1217 19:33:43.156496 267684 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1217 19:33:43.163542 267684 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1217 19:33:43.171156 267684 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1217 19:33:43.179401 267684 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1217 19:33:43.187058 267684 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1217 19:33:43.194355 267684 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1217 19:33:43.201350 267684 kubeadm.go:401] StartCluster: {Name:functional-240388 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/22186/minikube-v1.37.0-1765965980-22186-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1765966054-22186@sha256:1c173489767e6632c410d2554f1a2272f032a423dd528157e201daadfe3c43f0 Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35
.0-rc.1 ClusterName:functional-240388 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.22 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountSt
ring: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1217 19:33:43.201471 267684 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1217 19:33:43.218924 267684 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1217 19:33:43.230913 267684 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1217 19:33:43.230923 267684 kubeadm.go:598] restartPrimaryControlPlane start ...
I1217 19:33:43.230973 267684 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1217 19:33:43.242368 267684 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1217 19:33:43.242866 267684 kubeconfig.go:125] found "functional-240388" server: "https://192.168.39.22:8441"
I1217 19:33:43.243982 267684 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1217 19:33:43.254672 267684 kubeadm.go:645] detected kubeadm config drift (will reconfigure cluster from new /var/tmp/minikube/kubeadm.yaml):
-- stdout --
--- /var/tmp/minikube/kubeadm.yaml
+++ /var/tmp/minikube/kubeadm.yaml.new
@@ -24,7 +24,7 @@
certSANs: ["127.0.0.1", "localhost", "192.168.39.22"]
extraArgs:
- name: "enable-admission-plugins"
- value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
+ value: "NamespaceAutoProvision"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
-- /stdout --
I1217 19:33:43.254681 267684 kubeadm.go:1161] stopping kube-system containers ...
I1217 19:33:43.254745 267684 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1217 19:33:43.275954 267684 docker.go:484] Stopping containers: [a17ea1c89782 8512c4ee5234 6a5939c9502d 1acc72667ffe 9cac3f35da9f 719551455fba 192352b42712 67f00add7f90 cf40faa6d26b 2eee0e13328f 16980b72586c ca0a9d83ba38 9b648a420d5f 4af0a786a194 287f41e5445c bb348be6d197 148616d57564 2a94d92ddfbf a7ce08614779 7a5020f70312 9fca1633c22a 54deab0a7e37 bfcb4221d4a7 7b61216a620a 5179f7af9585 70c515d79623 de691c17fad0 c9f3d097d04d bc39802d6918 1c77d49437e1 4953e0b7245e]
I1217 19:33:43.276067 267684 ssh_runner.go:195] Run: docker stop a17ea1c89782 8512c4ee5234 6a5939c9502d 1acc72667ffe 9cac3f35da9f 719551455fba 192352b42712 67f00add7f90 cf40faa6d26b 2eee0e13328f 16980b72586c ca0a9d83ba38 9b648a420d5f 4af0a786a194 287f41e5445c bb348be6d197 148616d57564 2a94d92ddfbf a7ce08614779 7a5020f70312 9fca1633c22a 54deab0a7e37 bfcb4221d4a7 7b61216a620a 5179f7af9585 70c515d79623 de691c17fad0 c9f3d097d04d bc39802d6918 1c77d49437e1 4953e0b7245e
I1217 19:33:43.624025 267684 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1217 19:33:43.676356 267684 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1217 19:33:43.689348 267684 kubeadm.go:158] found existing configuration files:
-rw------- 1 root root 5635 Dec 17 19:31 /etc/kubernetes/admin.conf
-rw------- 1 root root 5637 Dec 17 19:32 /etc/kubernetes/controller-manager.conf
-rw------- 1 root root 5677 Dec 17 19:32 /etc/kubernetes/kubelet.conf
-rw------- 1 root root 5585 Dec 17 19:32 /etc/kubernetes/scheduler.conf
I1217 19:33:43.689427 267684 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I1217 19:33:43.700899 267684 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I1217 19:33:43.712072 267684 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf: Process exited with status 1
stdout:
stderr:
I1217 19:33:43.712160 267684 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1217 19:33:43.724140 267684 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I1217 19:33:43.735067 267684 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
stdout:
stderr:
I1217 19:33:43.735130 267684 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1217 19:33:43.746444 267684 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I1217 19:33:43.757136 267684 kubeadm.go:164] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
stdout:
stderr:
I1217 19:33:43.757188 267684 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1217 19:33:43.768689 267684 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1217 19:33:43.780126 267684 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1217 19:33:43.830635 267684 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1217 19:33:44.318201 267684 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1217 19:33:44.586483 267684 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1217 19:33:44.644239 267684 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1217 19:33:44.723984 267684 api_server.go:52] waiting for apiserver process to appear ...
I1217 19:33:44.724052 267684 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1217 19:33:45.225085 267684 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1217 19:33:45.725212 267684 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1217 19:33:46.225040 267684 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1217 19:33:46.263588 267684 api_server.go:72] duration metric: took 1.539620704s to wait for apiserver process to appear ...
I1217 19:33:46.263624 267684 api_server.go:88] waiting for apiserver healthz status ...
I1217 19:33:46.263642 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:48.060178 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W1217 19:33:48.060199 267684 api_server.go:103] status: https://192.168.39.22:8441/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I1217 19:33:48.060212 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:48.082825 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W1217 19:33:48.082853 267684 api_server.go:103] status: https://192.168.39.22:8441/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I1217 19:33:48.264243 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:48.270123 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[-]poststarthook/start-kube-apiserver-identity-lease-controller failed: reason withheld
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1217 19:33:48.270140 267684 api_server.go:103] status: https://192.168.39.22:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[-]poststarthook/start-kube-apiserver-identity-lease-controller failed: reason withheld
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1217 19:33:48.763703 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:48.771293 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1217 19:33:48.771313 267684 api_server.go:103] status: https://192.168.39.22:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1217 19:33:49.263841 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:49.285040 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1217 19:33:49.285061 267684 api_server.go:103] status: https://192.168.39.22:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1217 19:33:49.764778 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:49.771574 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 200:
ok
I1217 19:33:49.779794 267684 api_server.go:141] control plane version: v1.35.0-rc.1
I1217 19:33:49.779830 267684 api_server.go:131] duration metric: took 3.516200098s to wait for apiserver health ...
I1217 19:33:49.779839 267684 cni.go:84] Creating CNI manager for ""
I1217 19:33:49.779849 267684 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1217 19:33:49.781831 267684 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1217 19:33:49.783461 267684 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1217 19:33:49.809372 267684 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1217 19:33:49.861811 267684 system_pods.go:43] waiting for kube-system pods to appear ...
I1217 19:33:49.866819 267684 system_pods.go:59] 7 kube-system pods found
I1217 19:33:49.866868 267684 system_pods.go:61] "coredns-7d764666f9-p2jc7" [463dfe4a-5f2b-4d8b-969f-3288b215bcba] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1217 19:33:49.866878 267684 system_pods.go:61] "etcd-functional-240388" [b25d5f2b-38a8-43f6-a9ca-650e1080eddf] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1217 19:33:49.866884 267684 system_pods.go:61] "kube-apiserver-functional-240388" [f6453f94-5276-4e95-9449-699193d4b24c] Pending
I1217 19:33:49.866893 267684 system_pods.go:61] "kube-controller-manager-functional-240388" [0582fe42-e649-424f-8850-7fbbffcaa22e] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1217 19:33:49.866903 267684 system_pods.go:61] "kube-proxy-9b4xt" [74afb855-c8bc-4697-ae99-f445db36b930] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1217 19:33:49.866909 267684 system_pods.go:61] "kube-scheduler-functional-240388" [40e6e45c-16f3-41e6-81ea-3e8b63efbd54] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1217 19:33:49.866913 267684 system_pods.go:61] "storage-provisioner" [377236c5-a7a8-4bb5-834d-3140d3393035] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1217 19:33:49.866920 267684 system_pods.go:74] duration metric: took 5.094555ms to wait for pod list to return data ...
I1217 19:33:49.866929 267684 node_conditions.go:102] verifying NodePressure condition ...
I1217 19:33:49.872686 267684 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1217 19:33:49.872705 267684 node_conditions.go:123] node cpu capacity is 2
I1217 19:33:49.872722 267684 node_conditions.go:105] duration metric: took 5.787969ms to run NodePressure ...
I1217 19:33:49.872783 267684 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0-rc.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1217 19:33:50.179374 267684 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1217 19:33:50.182281 267684 kubeadm.go:744] kubelet initialised
I1217 19:33:50.182292 267684 kubeadm.go:745] duration metric: took 2.903208ms waiting for restarted kubelet to initialise ...
I1217 19:33:50.182307 267684 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1217 19:33:50.201867 267684 ops.go:34] apiserver oom_adj: -16
I1217 19:33:50.201881 267684 kubeadm.go:602] duration metric: took 6.970952997s to restartPrimaryControlPlane
I1217 19:33:50.201892 267684 kubeadm.go:403] duration metric: took 7.000554069s to StartCluster
I1217 19:33:50.201919 267684 settings.go:142] acquiring lock: {Name:mk9bce2c5cb192383c5c2d74365fff53c608cc17 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1217 19:33:50.202011 267684 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/22186-255930/kubeconfig
I1217 19:33:50.203049 267684 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22186-255930/kubeconfig: {Name:mk8f63919c382cf8d5b565d23aa50d046bd25197 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1217 19:33:50.203354 267684 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.39.22 Port:8441 KubernetesVersion:v1.35.0-rc.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1217 19:33:50.203438 267684 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1217 19:33:50.203528 267684 addons.go:70] Setting storage-provisioner=true in profile "functional-240388"
I1217 19:33:50.203546 267684 addons.go:239] Setting addon storage-provisioner=true in "functional-240388"
W1217 19:33:50.203553 267684 addons.go:248] addon storage-provisioner should already be in state true
I1217 19:33:50.203581 267684 host.go:66] Checking if "functional-240388" exists ...
I1217 19:33:50.203581 267684 config.go:182] Loaded profile config "functional-240388": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.35.0-rc.1
I1217 19:33:50.203574 267684 addons.go:70] Setting default-storageclass=true in profile "functional-240388"
I1217 19:33:50.203616 267684 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "functional-240388"
I1217 19:33:50.206087 267684 addons.go:239] Setting addon default-storageclass=true in "functional-240388"
W1217 19:33:50.206095 267684 addons.go:248] addon default-storageclass should already be in state true
I1217 19:33:50.206113 267684 host.go:66] Checking if "functional-240388" exists ...
I1217 19:33:50.207362 267684 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1217 19:33:50.207371 267684 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1217 19:33:50.209580 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:50.209815 267684 out.go:179] * Verifying Kubernetes components...
I1217 19:33:50.209822 267684 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1217 19:33:50.209953 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:50.209970 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:50.210109 267684 sshutil.go:53] new ssh client: &{IP:192.168.39.22 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/functional-240388/id_rsa Username:docker}
I1217 19:33:50.211031 267684 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1217 19:33:50.211042 267684 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1217 19:33:50.211049 267684 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1217 19:33:50.213015 267684 main.go:143] libmachine: domain functional-240388 has defined MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:50.213326 267684 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:f3:98:a3", ip: ""} in network mk-functional-240388: {Iface:virbr1 ExpiryTime:2025-12-17 20:31:23 +0000 UTC Type:0 Mac:52:54:00:f3:98:a3 Iaid: IPaddr:192.168.39.22 Prefix:24 Hostname:functional-240388 Clientid:01:52:54:00:f3:98:a3}
I1217 19:33:50.213338 267684 main.go:143] libmachine: domain functional-240388 has defined IP address 192.168.39.22 and MAC address 52:54:00:f3:98:a3 in network mk-functional-240388
I1217 19:33:50.213455 267684 sshutil.go:53] new ssh client: &{IP:192.168.39.22 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/22186-255930/.minikube/machines/functional-240388/id_rsa Username:docker}
I1217 19:33:50.474258 267684 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1217 19:33:50.505661 267684 node_ready.go:35] waiting up to 6m0s for node "functional-240388" to be "Ready" ...
I1217 19:33:50.509464 267684 node_ready.go:49] node "functional-240388" is "Ready"
I1217 19:33:50.509481 267684 node_ready.go:38] duration metric: took 3.795113ms for node "functional-240388" to be "Ready" ...
I1217 19:33:50.509497 267684 api_server.go:52] waiting for apiserver process to appear ...
I1217 19:33:50.509549 267684 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1217 19:33:50.534536 267684 api_server.go:72] duration metric: took 331.145216ms to wait for apiserver process to appear ...
I1217 19:33:50.534563 267684 api_server.go:88] waiting for apiserver healthz status ...
I1217 19:33:50.534581 267684 api_server.go:253] Checking apiserver healthz at https://192.168.39.22:8441/healthz ...
I1217 19:33:50.548651 267684 api_server.go:279] https://192.168.39.22:8441/healthz returned 200:
ok
I1217 19:33:50.550644 267684 api_server.go:141] control plane version: v1.35.0-rc.1
I1217 19:33:50.550659 267684 api_server.go:131] duration metric: took 16.091059ms to wait for apiserver health ...
I1217 19:33:50.550667 267684 system_pods.go:43] waiting for kube-system pods to appear ...
I1217 19:33:50.565804 267684 system_pods.go:59] 7 kube-system pods found
I1217 19:33:50.565825 267684 system_pods.go:61] "coredns-7d764666f9-p2jc7" [463dfe4a-5f2b-4d8b-969f-3288b215bcba] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1217 19:33:50.565830 267684 system_pods.go:61] "etcd-functional-240388" [b25d5f2b-38a8-43f6-a9ca-650e1080eddf] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1217 19:33:50.565834 267684 system_pods.go:61] "kube-apiserver-functional-240388" [f6453f94-5276-4e95-9449-699193d4b24c] Pending
I1217 19:33:50.565838 267684 system_pods.go:61] "kube-controller-manager-functional-240388" [0582fe42-e649-424f-8850-7fbbffcaa22e] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1217 19:33:50.565842 267684 system_pods.go:61] "kube-proxy-9b4xt" [74afb855-c8bc-4697-ae99-f445db36b930] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1217 19:33:50.565846 267684 system_pods.go:61] "kube-scheduler-functional-240388" [40e6e45c-16f3-41e6-81ea-3e8b63efbd54] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1217 19:33:50.565850 267684 system_pods.go:61] "storage-provisioner" [377236c5-a7a8-4bb5-834d-3140d3393035] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1217 19:33:50.565855 267684 system_pods.go:74] duration metric: took 15.183886ms to wait for pod list to return data ...
I1217 19:33:50.565862 267684 default_sa.go:34] waiting for default service account to be created ...
I1217 19:33:50.570724 267684 default_sa.go:45] found service account: "default"
I1217 19:33:50.570738 267684 default_sa.go:55] duration metric: took 4.870957ms for default service account to be created ...
I1217 19:33:50.570746 267684 system_pods.go:116] waiting for k8s-apps to be running ...
I1217 19:33:50.574791 267684 system_pods.go:86] 7 kube-system pods found
I1217 19:33:50.574808 267684 system_pods.go:89] "coredns-7d764666f9-p2jc7" [463dfe4a-5f2b-4d8b-969f-3288b215bcba] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1217 19:33:50.574815 267684 system_pods.go:89] "etcd-functional-240388" [b25d5f2b-38a8-43f6-a9ca-650e1080eddf] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1217 19:33:50.574820 267684 system_pods.go:89] "kube-apiserver-functional-240388" [f6453f94-5276-4e95-9449-699193d4b24c] Pending
I1217 19:33:50.574825 267684 system_pods.go:89] "kube-controller-manager-functional-240388" [0582fe42-e649-424f-8850-7fbbffcaa22e] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1217 19:33:50.574829 267684 system_pods.go:89] "kube-proxy-9b4xt" [74afb855-c8bc-4697-ae99-f445db36b930] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1217 19:33:50.574833 267684 system_pods.go:89] "kube-scheduler-functional-240388" [40e6e45c-16f3-41e6-81ea-3e8b63efbd54] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1217 19:33:50.574836 267684 system_pods.go:89] "storage-provisioner" [377236c5-a7a8-4bb5-834d-3140d3393035] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1217 19:33:50.574857 267684 retry.go:31] will retry after 274.467854ms: missing components: kube-apiserver
I1217 19:33:50.586184 267684 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1217 19:33:50.600769 267684 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.35.0-rc.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1217 19:33:50.854208 267684 system_pods.go:86] 7 kube-system pods found
I1217 19:33:50.854237 267684 system_pods.go:89] "coredns-7d764666f9-p2jc7" [463dfe4a-5f2b-4d8b-969f-3288b215bcba] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1217 19:33:50.854243 267684 system_pods.go:89] "etcd-functional-240388" [b25d5f2b-38a8-43f6-a9ca-650e1080eddf] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1217 19:33:50.854253 267684 system_pods.go:89] "kube-apiserver-functional-240388" [f6453f94-5276-4e95-9449-699193d4b24c] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1217 19:33:50.854261 267684 system_pods.go:89] "kube-controller-manager-functional-240388" [0582fe42-e649-424f-8850-7fbbffcaa22e] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1217 19:33:50.854267 267684 system_pods.go:89] "kube-proxy-9b4xt" [74afb855-c8bc-4697-ae99-f445db36b930] Running
I1217 19:33:50.854274 267684 system_pods.go:89] "kube-scheduler-functional-240388" [40e6e45c-16f3-41e6-81ea-3e8b63efbd54] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1217 19:33:50.854278 267684 system_pods.go:89] "storage-provisioner" [377236c5-a7a8-4bb5-834d-3140d3393035] Running
I1217 19:33:50.854286 267684 system_pods.go:126] duration metric: took 283.535465ms to wait for k8s-apps to be running ...
I1217 19:33:50.854295 267684 system_svc.go:44] waiting for kubelet service to be running ....
I1217 19:33:50.854411 267684 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1217 19:33:51.435751 267684 system_svc.go:56] duration metric: took 581.446ms WaitForService to wait for kubelet
I1217 19:33:51.435770 267684 kubeadm.go:587] duration metric: took 1.23238848s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1217 19:33:51.435787 267684 node_conditions.go:102] verifying NodePressure condition ...
I1217 19:33:51.439212 267684 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1217 19:33:51.439232 267684 node_conditions.go:123] node cpu capacity is 2
I1217 19:33:51.439249 267684 node_conditions.go:105] duration metric: took 3.454979ms to run NodePressure ...
I1217 19:33:51.439262 267684 start.go:242] waiting for startup goroutines ...
I1217 19:33:51.444332 267684 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I1217 19:33:51.445791 267684 addons.go:530] duration metric: took 1.242367945s for enable addons: enabled=[storage-provisioner default-storageclass]
I1217 19:33:51.445830 267684 start.go:247] waiting for cluster config update ...
I1217 19:33:51.445844 267684 start.go:256] writing updated cluster config ...
I1217 19:33:51.446209 267684 ssh_runner.go:195] Run: rm -f paused
I1217 19:33:51.452231 267684 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1217 19:33:51.457011 267684 pod_ready.go:83] waiting for pod "coredns-7d764666f9-p2jc7" in "kube-system" namespace to be "Ready" or be gone ...
W1217 19:33:53.464153 267684 pod_ready.go:104] pod "coredns-7d764666f9-p2jc7" is not "Ready", error: <nil>
W1217 19:33:55.962739 267684 pod_ready.go:104] pod "coredns-7d764666f9-p2jc7" is not "Ready", error: <nil>
I1217 19:33:57.974056 267684 pod_ready.go:94] pod "coredns-7d764666f9-p2jc7" is "Ready"
I1217 19:33:57.974098 267684 pod_ready.go:86] duration metric: took 6.517047688s for pod "coredns-7d764666f9-p2jc7" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:33:57.977362 267684 pod_ready.go:83] waiting for pod "etcd-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:33:59.983912 267684 pod_ready.go:94] pod "etcd-functional-240388" is "Ready"
I1217 19:33:59.983929 267684 pod_ready.go:86] duration metric: took 2.00655136s for pod "etcd-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:33:59.986470 267684 pod_ready.go:83] waiting for pod "kube-apiserver-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:33:59.990980 267684 pod_ready.go:94] pod "kube-apiserver-functional-240388" is "Ready"
I1217 19:33:59.991000 267684 pod_ready.go:86] duration metric: took 4.511047ms for pod "kube-apiserver-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:33:59.993459 267684 pod_ready.go:83] waiting for pod "kube-controller-manager-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:33:59.997937 267684 pod_ready.go:94] pod "kube-controller-manager-functional-240388" is "Ready"
I1217 19:33:59.997954 267684 pod_ready.go:86] duration metric: took 4.482221ms for pod "kube-controller-manager-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:34:00.000145 267684 pod_ready.go:83] waiting for pod "kube-proxy-9b4xt" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:34:00.361563 267684 pod_ready.go:94] pod "kube-proxy-9b4xt" is "Ready"
I1217 19:34:00.361586 267684 pod_ready.go:86] duration metric: took 361.42797ms for pod "kube-proxy-9b4xt" in "kube-system" namespace to be "Ready" or be gone ...
I1217 19:34:00.561139 267684 pod_ready.go:83] waiting for pod "kube-scheduler-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
W1217 19:34:02.566440 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:04.567079 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:06.568433 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:09.069006 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:11.568213 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:14.067100 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:16.067895 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:18.068093 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:20.568800 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:23.067669 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:25.068181 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:27.069140 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:29.567682 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:32.067944 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:34.567970 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:36.568410 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:38.568875 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:41.067285 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:43.067582 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:45.068991 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:47.568211 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:50.067798 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:52.567110 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:55.068867 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:57.567142 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:34:59.567565 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:02.067195 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:04.067302 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:06.068868 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:08.568773 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:11.067299 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:13.067770 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:15.567220 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:17.567959 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:20.067787 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:22.068724 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:24.568343 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:26.568770 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:29.067710 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:31.068145 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:33.568905 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:36.067452 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:38.567405 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:41.067223 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:43.068236 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:45.566660 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:47.568023 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:50.067628 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:52.067691 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:54.566681 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:56.567070 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:35:58.567205 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:00.567585 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:03.066911 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:05.067697 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:07.567267 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:10.070065 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:12.567282 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:15.067336 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:17.067923 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:19.068236 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:21.567784 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:23.568193 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:26.068627 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:28.568621 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:31.067696 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:33.067753 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:35.567389 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:37.568393 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:40.067868 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:42.068526 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:44.568754 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:47.066209 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:49.067317 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:51.067920 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:53.067951 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:55.568233 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:36:58.067178 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:00.568182 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:02.568983 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:05.068318 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:07.567541 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:10.068359 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:12.567929 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:15.067934 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:17.568019 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:19.568147 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:22.067853 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:24.568939 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:27.067623 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:29.068086 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:31.068905 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:33.567497 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:35.571222 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:38.068303 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:40.566716 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:42.567116 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:45.066812 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:47.069637 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
W1217 19:37:49.567275 267684 pod_ready.go:104] pod "kube-scheduler-functional-240388" is not "Ready", error: <nil>
I1217 19:37:51.452903 267684 pod_ready.go:86] duration metric: took 3m50.891734106s for pod "kube-scheduler-functional-240388" in "kube-system" namespace to be "Ready" or be gone ...
W1217 19:37:51.452931 267684 pod_ready.go:65] not all pods in "kube-system" namespace with "component=kube-scheduler" label are "Ready", will retry: waitPodCondition: context deadline exceeded
I1217 19:37:51.452945 267684 pod_ready.go:40] duration metric: took 4m0.000689077s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1217 19:37:51.455139 267684 out.go:203]
W1217 19:37:51.456702 267684 out.go:285] X Exiting due to GUEST_START: extra waiting: WaitExtra: context deadline exceeded
I1217 19:37:51.457988 267684 out.go:203]
==> Docker <==
Dec 17 19:33:43 functional-240388 dockerd[7924]: time="2025-12-17T19:33:43.473198163Z" level=info msg="ignoring event" container=6a5939c9502dbcd502681138b069c09eae18b9d8938bb170983b90cf6c57d283 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 17 19:33:44 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:44Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-p2jc7_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"1acc72667ffecb9b1f9c1462cd1503c88f603f0996e69135f49a6c923e49ea3e\""
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"bfcb4221d4a7ef30dae2a66cd597250594cae53eecf1156b58fc897c3db4adb2\". Proceed without further sandbox information."
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"bc39802d69185bb3fe4387bc88803990af8a467199a407d8ff3139edd897ad31\". Proceed without further sandbox information."
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"de691c17fad0893ec8eda10a693c32635b6669ce118c79ef0ba02c521d246106\". Proceed without further sandbox information."
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"a7ce08614779dcde6425e02aa30ac8921f91945d930f48ac401c8f72dfd73c97\". Proceed without further sandbox information."
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"c9f3d097d04d9a9046d428a65af3812c3ab56cc0bffba2a9ad0f66a88bfc4afa\". Proceed without further sandbox information."
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/99980b89e2f75344098f5ed8a2c5d8550ce5ecb31a633fbe4a58540f6365e83f/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b52517caf11d99d0e709e8bb4270cba47fe414e0d2abdc432798bc350b43f823/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/2eee46eecde457c617f877978293fdd4990ba0015ac72f57962f009d7372900e/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c56608b9e4727946ea7e1d159d57918d83a65898cb2bbe22e2de60e264d7921a/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:45 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:45Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-7d764666f9-p2jc7_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"1acc72667ffecb9b1f9c1462cd1503c88f603f0996e69135f49a6c923e49ea3e\""
Dec 17 19:33:46 functional-240388 dockerd[7924]: time="2025-12-17T19:33:46.635036806Z" level=info msg="ignoring event" container=9ddcfd03def09fa056533c5127dfcbdc2e222868d94c39bba44f3d1b1c432fdb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 17 19:33:48 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:48Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Dec 17 19:33:49 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a51d46f16da861c5c8c4221920bfa125a4f956a42f6e7194de3b5d72ba8aa080/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:49 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/3c1a68a5a63f99b2b353b54a2893bc8ba4f54b12c5da116a989ab7f96dcc78cb/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:49 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:33:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/28fbe3560cc6992430231caf18aa8e1adaa0a1b5412dbfe4e46274db26a4284a/resolv.conf as [nameserver 192.168.122.1]"
Dec 17 19:33:58 functional-240388 dockerd[7924]: time="2025-12-17T19:33:58.156995422Z" level=info msg="ignoring event" container=df58efeab0f3eac3bed6fbf1084e04f8510bc53762ed05d42fdf793c4585f427 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 17 19:34:25 functional-240388 dockerd[7924]: time="2025-12-17T19:34:25.215727670Z" level=info msg="ignoring event" container=d78244b593944cd7e0e4f16b9b888f48bc7129b768d4e5e0bf62b079bec06dce module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 17 19:34:54 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:34:54Z" level=error msg="error getting RW layer size for container ID '287f41e5445c4de7ef61e7b0c9e3722323849e5b526ad5d9226062c00d21909b': Error response from daemon: No such container: 287f41e5445c4de7ef61e7b0c9e3722323849e5b526ad5d9226062c00d21909b"
Dec 17 19:34:54 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:34:54Z" level=error msg="Set backoffDuration to : 1m0s for container ID '287f41e5445c4de7ef61e7b0c9e3722323849e5b526ad5d9226062c00d21909b'"
Dec 17 19:35:16 functional-240388 dockerd[7924]: time="2025-12-17T19:35:16.200016889Z" level=info msg="ignoring event" container=bda7b0ce1a09acea20f60556583a189d78353284e5aa024fe014450268259e70 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 17 19:35:24 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:35:24Z" level=error msg="error getting RW layer size for container ID 'd78244b593944cd7e0e4f16b9b888f48bc7129b768d4e5e0bf62b079bec06dce': Error response from daemon: No such container: d78244b593944cd7e0e4f16b9b888f48bc7129b768d4e5e0bf62b079bec06dce"
Dec 17 19:35:24 functional-240388 cri-dockerd[8838]: time="2025-12-17T19:35:24Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'd78244b593944cd7e0e4f16b9b888f48bc7129b768d4e5e0bf62b079bec06dce'"
Dec 17 19:36:55 functional-240388 dockerd[7924]: time="2025-12-17T19:36:55.174600498Z" level=info msg="ignoring event" container=c389d6e9b8b25fe06ee328f3917cf450d84ad5662adb95ce9d71dd903fa1b18d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
c389d6e9b8b25 73f80cdc073da 58 seconds ago Exited kube-scheduler 8 c56608b9e4727 kube-scheduler-functional-240388 kube-system
d3cd2a6021efd af0321f3a4f38 4 minutes ago Running kube-proxy 4 28fbe3560cc69 kube-proxy-9b4xt kube-system
bbeee7ea3c766 6e38f40d628db 4 minutes ago Running storage-provisioner 3 3c1a68a5a63f9 storage-provisioner kube-system
720ef2e2e28ba aa5e3ebc0dfed 4 minutes ago Running coredns 3 a51d46f16da86 coredns-7d764666f9-p2jc7 kube-system
060eef1436e87 5032a56602e1b 4 minutes ago Running kube-controller-manager 4 2eee46eecde45 kube-controller-manager-functional-240388 kube-system
7ed9aced06540 0a108f7189562 4 minutes ago Running etcd 3 b52517caf11d9 etcd-functional-240388 kube-system
5e0236591f856 58865405a13bc 4 minutes ago Running kube-apiserver 0 99980b89e2f75 kube-apiserver-functional-240388 kube-system
8512c4ee52340 af0321f3a4f38 4 minutes ago Exited kube-proxy 3 cf40faa6d26bc kube-proxy-9b4xt kube-system
6a5939c9502db 5032a56602e1b 4 minutes ago Exited kube-controller-manager 3 67f00add7f903 kube-controller-manager-functional-240388 kube-system
2eee0e13328f5 aa5e3ebc0dfed 5 minutes ago Exited coredns 2 16980b72586cd coredns-7d764666f9-p2jc7 kube-system
9b648a420d5f2 6e38f40d628db 5 minutes ago Exited storage-provisioner 2 9fca1633c22ad storage-provisioner kube-system
bb348be6d197a 0a108f7189562 5 minutes ago Exited etcd 2 2a94d92ddfbf2 etcd-functional-240388 kube-system
==> coredns [2eee0e13328f] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 680cec097987c24242735352e9de77b2ba657caea131666c4002607b6f81fb6322fe6fa5c2d434be3fcd1251845cd6b7641e3a08a7d3b88486730de31a010646
CoreDNS-1.13.1
linux/amd64, go1.25.2, 1db4568
[INFO] 127.0.0.1:38482 - 26281 "HINFO IN 7082336438510137172.6908441513580825570. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.027199885s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [720ef2e2e28b] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 680cec097987c24242735352e9de77b2ba657caea131666c4002607b6f81fb6322fe6fa5c2d434be3fcd1251845cd6b7641e3a08a7d3b88486730de31a010646
CoreDNS-1.13.1
linux/amd64, go1.25.2, 1db4568
[INFO] 127.0.0.1:60164 - 54627 "HINFO IN 8701000521322517761.9006979715444964387. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.118925994s
==> describe nodes <==
Name: functional-240388
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=functional-240388
kubernetes.io/os=linux
minikube.k8s.io/commit=2e96f676eb7e96389e85fe0658a4ede4c4ba6924
minikube.k8s.io/name=functional-240388
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_12_17T19_31_43_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 17 Dec 2025 19:31:39 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: functional-240388
AcquireTime: <unset>
RenewTime: Wed, 17 Dec 2025 19:37:43 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 17 Dec 2025 19:33:48 +0000 Wed, 17 Dec 2025 19:31:38 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 17 Dec 2025 19:33:48 +0000 Wed, 17 Dec 2025 19:31:38 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 17 Dec 2025 19:33:48 +0000 Wed, 17 Dec 2025 19:31:38 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 17 Dec 2025 19:33:48 +0000 Wed, 17 Dec 2025 19:31:48 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.39.22
Hostname: functional-240388
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 4001788Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 4001788Ki
pods: 110
System Info:
Machine ID: ca73804dacda4148bcecb3c8c2b68c32
System UUID: ca73804d-acda-4148-bcec-b3c8c2b68c32
Boot ID: 23b31f3c-6ff1-49c8-bae1-b3e21418a3ce
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.2
Kubelet Version: v1.35.0-rc.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-7d764666f9-p2jc7 100m (5%) 0 (0%) 70Mi (1%) 170Mi (4%) 6m5s
kube-system etcd-functional-240388 100m (5%) 0 (0%) 100Mi (2%) 0 (0%) 6m12s
kube-system kube-apiserver-functional-240388 250m (12%) 0 (0%) 0 (0%) 0 (0%) 4m3s
kube-system kube-controller-manager-functional-240388 200m (10%) 0 (0%) 0 (0%) 0 (0%) 6m12s
kube-system kube-proxy-9b4xt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m5s
kube-system kube-scheduler-functional-240388 100m (5%) 0 (0%) 0 (0%) 0 (0%) 6m12s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m4s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%) 0 (0%)
memory 170Mi (4%) 170Mi (4%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 6m6s node-controller Node functional-240388 event: Registered Node functional-240388 in Controller
Normal RegisteredNode 5m8s node-controller Node functional-240388 event: Registered Node functional-240388 in Controller
Normal RegisteredNode 4m1s node-controller Node functional-240388 event: Registered Node functional-240388 in Controller
==> dmesg <==
[ +0.280021] kauditd_printk_skb: 29 callbacks suppressed
[ +0.109752] kauditd_printk_skb: 345 callbacks suppressed
[ +0.098255] kauditd_printk_skb: 205 callbacks suppressed
[ +0.156272] kauditd_printk_skb: 165 callbacks suppressed
[ +0.603143] kauditd_printk_skb: 18 callbacks suppressed
[ +0.025033] kauditd_printk_skb: 219 callbacks suppressed
[Dec17 19:32] kauditd_printk_skb: 12 callbacks suppressed
[ +5.510593] kauditd_printk_skb: 22 callbacks suppressed
[ +0.045349] kauditd_printk_skb: 56 callbacks suppressed
[ +2.055200] kauditd_printk_skb: 400 callbacks suppressed
[ +0.128571] kauditd_printk_skb: 107 callbacks suppressed
[ +1.610657] kauditd_printk_skb: 150 callbacks suppressed
[Dec17 19:33] kauditd_printk_skb: 2 callbacks suppressed
[ +20.173189] kauditd_printk_skb: 12 callbacks suppressed
[ +0.324441] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
[ +0.000004] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
[ +0.006613] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
[ +0.000004] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.
[ +5.161832] kauditd_printk_skb: 21 callbacks suppressed
[ +0.933223] kauditd_printk_skb: 410 callbacks suppressed
[ +0.335071] kauditd_printk_skb: 228 callbacks suppressed
[ +6.261034] kauditd_printk_skb: 2 callbacks suppressed
[Dec17 19:34] kauditd_printk_skb: 34 callbacks suppressed
[Dec17 19:35] kauditd_printk_skb: 6 callbacks suppressed
[Dec17 19:36] kauditd_printk_skb: 6 callbacks suppressed
==> etcd [7ed9aced0654] <==
{"level":"info","ts":"2025-12-17T19:33:46.239077Z","caller":"membership/cluster.go:433","msg":"ignore already added member","cluster-id":"eaed0234649c774e","local-member-id":"cde0bb267fc4e559","added-peer-id":"cde0bb267fc4e559","added-peer-peer-urls":["https://192.168.39.22:2380"],"added-peer-is-learner":false}
{"level":"info","ts":"2025-12-17T19:33:46.239215Z","caller":"membership/cluster.go:674","msg":"updated cluster version","cluster-id":"eaed0234649c774e","local-member-id":"cde0bb267fc4e559","from":"3.6","to":"3.6"}
{"level":"info","ts":"2025-12-17T19:33:46.235081Z","caller":"embed/etcd.go:766","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2025-12-17T19:33:46.235098Z","caller":"embed/etcd.go:640","msg":"serving peer traffic","address":"192.168.39.22:2380"}
{"level":"info","ts":"2025-12-17T19:33:46.239296Z","caller":"embed/etcd.go:611","msg":"cmux::serve","address":"192.168.39.22:2380"}
{"level":"info","ts":"2025-12-17T19:33:46.245673Z","caller":"embed/etcd.go:292","msg":"now serving peer/client/metrics","local-member-id":"cde0bb267fc4e559","initial-advertise-peer-urls":["https://192.168.39.22:2380"],"listen-peer-urls":["https://192.168.39.22:2380"],"advertise-client-urls":["https://192.168.39.22:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.22:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2025-12-17T19:33:46.245695Z","caller":"embed/etcd.go:890","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2025-12-17T19:33:46.806159Z","logger":"raft","caller":"v3@v3.6.0/raft.go:988","msg":"cde0bb267fc4e559 is starting a new election at term 4"}
{"level":"info","ts":"2025-12-17T19:33:46.806211Z","logger":"raft","caller":"v3@v3.6.0/raft.go:930","msg":"cde0bb267fc4e559 became pre-candidate at term 4"}
{"level":"info","ts":"2025-12-17T19:33:46.806249Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cde0bb267fc4e559 received MsgPreVoteResp from cde0bb267fc4e559 at term 4"}
{"level":"info","ts":"2025-12-17T19:33:46.806259Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cde0bb267fc4e559 has received 1 MsgPreVoteResp votes and 0 vote rejections"}
{"level":"info","ts":"2025-12-17T19:33:46.806272Z","logger":"raft","caller":"v3@v3.6.0/raft.go:912","msg":"cde0bb267fc4e559 became candidate at term 5"}
{"level":"info","ts":"2025-12-17T19:33:46.808150Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1077","msg":"cde0bb267fc4e559 received MsgVoteResp from cde0bb267fc4e559 at term 5"}
{"level":"info","ts":"2025-12-17T19:33:46.808241Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1693","msg":"cde0bb267fc4e559 has received 1 MsgVoteResp votes and 0 vote rejections"}
{"level":"info","ts":"2025-12-17T19:33:46.808263Z","logger":"raft","caller":"v3@v3.6.0/raft.go:970","msg":"cde0bb267fc4e559 became leader at term 5"}
{"level":"info","ts":"2025-12-17T19:33:46.808271Z","logger":"raft","caller":"v3@v3.6.0/node.go:370","msg":"raft.node: cde0bb267fc4e559 elected leader cde0bb267fc4e559 at term 5"}
{"level":"info","ts":"2025-12-17T19:33:46.811093Z","caller":"etcdserver/server.go:1820","msg":"published local member to cluster through raft","local-member-id":"cde0bb267fc4e559","local-member-attributes":"{Name:functional-240388 ClientURLs:[https://192.168.39.22:2379]}","cluster-id":"eaed0234649c774e","publish-timeout":"7s"}
{"level":"info","ts":"2025-12-17T19:33:46.811141Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-12-17T19:33:46.811333Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-12-17T19:33:46.812913Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-12-17T19:33:46.813647Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-12-17T19:33:46.814214Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-12-17T19:33:46.814326Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-12-17T19:33:46.815065Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-12-17T19:33:46.815938Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.22:2379"}
==> etcd [bb348be6d197] <==
{"level":"info","ts":"2025-12-17T19:32:39.703026Z","caller":"embed/serve.go:138","msg":"ready to serve client requests"}
{"level":"info","ts":"2025-12-17T19:32:39.704425Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2025-12-17T19:32:39.704604Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2025-12-17T19:32:39.705307Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-12-17T19:32:39.706437Z","caller":"v3rpc/health.go:63","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2025-12-17T19:32:39.709945Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.22:2379"}
{"level":"info","ts":"2025-12-17T19:32:39.711194Z","caller":"embed/serve.go:283","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2025-12-17T19:33:29.523469Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-12-17T19:33:29.523619Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"functional-240388","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.22:2380"],"advertise-client-urls":["https://192.168.39.22:2379"]}
{"level":"error","ts":"2025-12-17T19:33:29.524018Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-17T19:33:36.526356Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-12-17T19:33:36.535569Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-17T19:33:36.535641Z","caller":"etcdserver/server.go:1297","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"cde0bb267fc4e559","current-leader-member-id":"cde0bb267fc4e559"}
{"level":"info","ts":"2025-12-17T19:33:36.535804Z","caller":"etcdserver/server.go:2335","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"info","ts":"2025-12-17T19:33:36.535839Z","caller":"etcdserver/server.go:2358","msg":"server has stopped; stopping storage version's monitor"}
{"level":"warn","ts":"2025-12-17T19:33:36.536005Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-17T19:33:36.536165Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-17T19:33:36.536303Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-12-17T19:33:36.536584Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.22:2379: use of closed network connection"}
{"level":"warn","ts":"2025-12-17T19:33:36.536670Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.22:2379: use of closed network connection"}
{"level":"error","ts":"2025-12-17T19:33:36.536756Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.22:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-17T19:33:36.540108Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.39.22:2380"}
{"level":"error","ts":"2025-12-17T19:33:36.540158Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.39.22:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-12-17T19:33:36.540212Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.39.22:2380"}
{"level":"info","ts":"2025-12-17T19:33:36.540220Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"functional-240388","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.22:2380"],"advertise-client-urls":["https://192.168.39.22:2379"]}
==> kernel <==
19:37:52 up 6 min, 0 users, load average: 0.23, 0.54, 0.32
Linux functional-240388 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Dec 17 12:49:57 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [5e0236591f85] <==
I1217 19:33:48.193986 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:48.195040 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister"
I1217 19:33:48.195204 1 aggregator.go:187] initial CRD sync complete...
I1217 19:33:48.195297 1 autoregister_controller.go:144] Starting autoregister controller
I1217 19:33:48.195312 1 cache.go:32] Waiting for caches to sync for autoregister controller
I1217 19:33:48.195348 1 cache.go:39] Caches are synced for autoregister controller
I1217 19:33:48.196861 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1217 19:33:48.196978 1 cache.go:39] Caches are synced for LocalAvailability controller
I1217 19:33:48.199599 1 cache.go:39] Caches are synced for RemoteAvailability controller
I1217 19:33:48.199859 1 apf_controller.go:382] Running API Priority and Fairness config worker
I1217 19:33:48.199994 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process
I1217 19:33:48.203065 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:48.203116 1 policy_source.go:248] refreshing policies
E1217 19:33:48.203778 1 controller.go:97] Error removing old endpoints from kubernetes service: no API server IP addresses were listed in storage, refusing to erase all endpoints for the kubernetes Service
I1217 19:33:48.205324 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I1217 19:33:48.285472 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io
I1217 19:33:48.708639 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I1217 19:33:49.014823 1 storage_scheduling.go:139] all system priority classes are created successfully or already exist.
I1217 19:33:50.032084 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1217 19:33:50.096678 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1217 19:33:50.147339 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1217 19:33:50.159097 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1217 19:33:51.583557 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1217 19:33:51.633623 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1217 19:33:51.785119 1 controller.go:667] quota admission added evaluator for: endpoints
==> kube-controller-manager [060eef1436e8] <==
I1217 19:33:51.312115 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.312158 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.312172 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.312197 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.312243 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.317055 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.317108 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.317196 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.317219 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.317261 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.320834 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.326367 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.327452 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.327578 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.327778 1 range_allocator.go:177] "Sending events to api server"
I1217 19:33:51.327884 1 range_allocator.go:181] "Starting range CIDR allocator"
I1217 19:33:51.328405 1 shared_informer.go:370] "Waiting for caches to sync"
I1217 19:33:51.328621 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.329112 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.344055 1 shared_informer.go:370] "Waiting for caches to sync"
I1217 19:33:51.367478 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.413161 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:51.413193 1 garbagecollector.go:166] "Garbage collector: all resource monitors have synced"
I1217 19:33:51.413198 1 garbagecollector.go:169] "Proceeding to collect garbage"
I1217 19:33:51.444447 1 shared_informer.go:377] "Caches are synced"
==> kube-controller-manager [6a5939c9502d] <==
I1217 19:33:42.779068 1 serving.go:386] Generated self-signed cert in-memory
I1217 19:33:42.803815 1 controllermanager.go:189] "Starting" version="v1.35.0-rc.1"
I1217 19:33:42.805416 1 controllermanager.go:191] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1217 19:33:42.809073 1 secure_serving.go:211] Serving securely on 127.0.0.1:10257
I1217 19:33:42.809222 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1217 19:33:42.809956 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1217 19:33:42.811439 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
==> kube-proxy [8512c4ee5234] <==
I1217 19:33:42.844139 1 server_linux.go:53] "Using iptables proxy"
I1217 19:33:42.908637 1 shared_informer.go:370] "Waiting for caches to sync"
==> kube-proxy [d3cd2a6021ef] <==
I1217 19:33:50.115201 1 shared_informer.go:370] "Waiting for caches to sync"
I1217 19:33:50.219503 1 shared_informer.go:377] "Caches are synced"
I1217 19:33:50.219553 1 server.go:218] "Successfully retrieved NodeIPs" NodeIPs=["192.168.39.22"]
E1217 19:33:50.219660 1 server.go:255] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1217 19:33:50.358153 1 server_linux.go:107] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1217 19:33:50.358275 1 server.go:266] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1217 19:33:50.358324 1 server_linux.go:136] "Using iptables Proxier"
I1217 19:33:50.370513 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1217 19:33:50.370948 1 server.go:529] "Version info" version="v1.35.0-rc.1"
I1217 19:33:50.371342 1 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1217 19:33:50.382699 1 config.go:200] "Starting service config controller"
I1217 19:33:50.382951 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1217 19:33:50.382986 1 config.go:106] "Starting endpoint slice config controller"
I1217 19:33:50.383135 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1217 19:33:50.383323 1 config.go:403] "Starting serviceCIDR config controller"
I1217 19:33:50.383345 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1217 19:33:50.398739 1 config.go:309] "Starting node config controller"
I1217 19:33:50.398767 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1217 19:33:50.398774 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1217 19:33:50.484896 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1217 19:33:50.484936 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1217 19:33:50.484967 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [c389d6e9b8b2] <==
I1217 19:36:55.152302 1 serving.go:386] Generated self-signed cert in-memory
E1217 19:36:55.157564 1 run.go:72] "command failed" err="failed to create listener: failed to listen on 127.0.0.1:10259: listen tcp 127.0.0.1:10259: bind: address already in use"
==> kubelet <==
Dec 17 19:36:29 functional-240388 kubelet[10024]: E1217 19:36:29.722751 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-240388" containerName="etcd"
Dec 17 19:36:30 functional-240388 kubelet[10024]: E1217 19:36:30.722813 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-240388" containerName="kube-controller-manager"
Dec 17 19:36:33 functional-240388 kubelet[10024]: E1217 19:36:33.723188 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-240388" containerName="kube-apiserver"
Dec 17 19:36:44 functional-240388 kubelet[10024]: E1217 19:36:44.723669 10024 prober_manager.go:209] "Readiness probe already exists for container" pod="kube-system/coredns-7d764666f9-p2jc7" containerName="coredns"
Dec 17 19:36:54 functional-240388 kubelet[10024]: E1217 19:36:54.722966 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-240388" containerName="kube-scheduler"
Dec 17 19:36:54 functional-240388 kubelet[10024]: I1217 19:36:54.723058 10024 scope.go:122] "RemoveContainer" containerID="bda7b0ce1a09acea20f60556583a189d78353284e5aa024fe014450268259e70"
Dec 17 19:36:55 functional-240388 kubelet[10024]: E1217 19:36:55.227238 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-240388" containerName="kube-scheduler"
Dec 17 19:36:55 functional-240388 kubelet[10024]: I1217 19:36:55.227265 10024 scope.go:122] "RemoveContainer" containerID="c389d6e9b8b25fe06ee328f3917cf450d84ad5662adb95ce9d71dd903fa1b18d"
Dec 17 19:36:55 functional-240388 kubelet[10024]: E1217 19:36:55.227456 10024 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-scheduler pod=kube-scheduler-functional-240388_kube-system(a7752d693a1b0ca5f7f99c49d4c4d9a3)\"" pod="kube-system/kube-scheduler-functional-240388" podUID="a7752d693a1b0ca5f7f99c49d4c4d9a3"
Dec 17 19:36:56 functional-240388 kubelet[10024]: I1217 19:36:56.250990 10024 scope.go:122] "RemoveContainer" containerID="bda7b0ce1a09acea20f60556583a189d78353284e5aa024fe014450268259e70"
Dec 17 19:36:56 functional-240388 kubelet[10024]: E1217 19:36:56.251196 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-240388" containerName="kube-scheduler"
Dec 17 19:36:56 functional-240388 kubelet[10024]: I1217 19:36:56.251218 10024 scope.go:122] "RemoveContainer" containerID="c389d6e9b8b25fe06ee328f3917cf450d84ad5662adb95ce9d71dd903fa1b18d"
Dec 17 19:36:56 functional-240388 kubelet[10024]: E1217 19:36:56.252554 10024 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-scheduler pod=kube-scheduler-functional-240388_kube-system(a7752d693a1b0ca5f7f99c49d4c4d9a3)\"" pod="kube-system/kube-scheduler-functional-240388" podUID="a7752d693a1b0ca5f7f99c49d4c4d9a3"
Dec 17 19:36:57 functional-240388 kubelet[10024]: E1217 19:36:57.266982 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-240388" containerName="kube-scheduler"
Dec 17 19:36:57 functional-240388 kubelet[10024]: I1217 19:36:57.267020 10024 scope.go:122] "RemoveContainer" containerID="c389d6e9b8b25fe06ee328f3917cf450d84ad5662adb95ce9d71dd903fa1b18d"
Dec 17 19:36:57 functional-240388 kubelet[10024]: E1217 19:36:57.267174 10024 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-scheduler pod=kube-scheduler-functional-240388_kube-system(a7752d693a1b0ca5f7f99c49d4c4d9a3)\"" pod="kube-system/kube-scheduler-functional-240388" podUID="a7752d693a1b0ca5f7f99c49d4c4d9a3"
Dec 17 19:36:59 functional-240388 kubelet[10024]: E1217 19:36:59.912508 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-240388" containerName="kube-scheduler"
Dec 17 19:36:59 functional-240388 kubelet[10024]: I1217 19:36:59.912547 10024 scope.go:122] "RemoveContainer" containerID="c389d6e9b8b25fe06ee328f3917cf450d84ad5662adb95ce9d71dd903fa1b18d"
Dec 17 19:36:59 functional-240388 kubelet[10024]: E1217 19:36:59.912725 10024 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-scheduler pod=kube-scheduler-functional-240388_kube-system(a7752d693a1b0ca5f7f99c49d4c4d9a3)\"" pod="kube-system/kube-scheduler-functional-240388" podUID="a7752d693a1b0ca5f7f99c49d4c4d9a3"
Dec 17 19:37:04 functional-240388 kubelet[10024]: E1217 19:37:04.157154 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-scheduler-functional-240388" containerName="kube-scheduler"
Dec 17 19:37:04 functional-240388 kubelet[10024]: I1217 19:37:04.157203 10024 scope.go:122] "RemoveContainer" containerID="c389d6e9b8b25fe06ee328f3917cf450d84ad5662adb95ce9d71dd903fa1b18d"
Dec 17 19:37:04 functional-240388 kubelet[10024]: E1217 19:37:04.157445 10024 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=kube-scheduler pod=kube-scheduler-functional-240388_kube-system(a7752d693a1b0ca5f7f99c49d4c4d9a3)\"" pod="kube-system/kube-scheduler-functional-240388" podUID="a7752d693a1b0ca5f7f99c49d4c4d9a3"
Dec 17 19:37:34 functional-240388 kubelet[10024]: E1217 19:37:34.724113 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-apiserver-functional-240388" containerName="kube-apiserver"
Dec 17 19:37:38 functional-240388 kubelet[10024]: E1217 19:37:38.723737 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/kube-controller-manager-functional-240388" containerName="kube-controller-manager"
Dec 17 19:37:50 functional-240388 kubelet[10024]: E1217 19:37:50.723446 10024 prober_manager.go:197] "Startup probe already exists for container" pod="kube-system/etcd-functional-240388" containerName="etcd"
==> storage-provisioner [9b648a420d5f] <==
W1217 19:33:03.593135 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:05.598686 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:05.604262 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:07.608150 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:07.613505 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:09.616685 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:09.625987 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:11.628971 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:11.634196 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:13.638095 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:13.650817 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:15.653822 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:15.659165 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:17.662761 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:17.671275 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:19.675136 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:19.680125 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:21.685015 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:21.693876 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:23.697502 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:23.702567 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:25.706350 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:25.714991 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:27.719834 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:33:27.724945 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
==> storage-provisioner [bbeee7ea3c76] <==
W1217 19:37:28.460304 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:30.464230 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:30.469876 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:32.472633 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:32.481327 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:34.484750 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:34.490191 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:36.493922 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:36.502058 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:38.505748 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:38.510948 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:40.514429 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:40.522307 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:42.531030 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:42.538013 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:44.541135 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:44.550203 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:46.554820 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:46.561173 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:48.564580 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:48.569437 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:50.572296 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:50.581205 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:52.586052 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1217 19:37:52.592816 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-240388 -n functional-240388
helpers_test.go:270: (dbg) Run: kubectl --context functional-240388 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:294: <<< TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig FAILED: end of post-mortem logs <<<
helpers_test.go:295: ---------------------/post-mortem---------------------------------
--- FAIL: TestFunctionalNewestKubernetes/Versionv1.35.0-rc.1/serial/ExtraConfig (286.88s)