=== RUN TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p no-preload-019660 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 pause -p no-preload-019660 --alsologtostderr -v=1: (1.571604429s)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
E1123 08:57:40.923908 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:43.933855 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.921635 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.928126 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.939747 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.961269 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.002876 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.084366 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.245911 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.567695 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:47.209951 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:48.491430 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660: exit status 2 (15.763002847s)
-- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: post-pause apiserver status = "Stopped"; want = "Paused"
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-019660 -n no-preload-019660
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-019660 -n no-preload-019660: exit status 2 (15.764590369s)
-- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p no-preload-019660 --alsologtostderr -v=1
E1123 08:58:06.417197 22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 unpause -p no-preload-019660 --alsologtostderr -v=1: (1.01628798s)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:252: <<< TestStartStop/group/no-preload/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p no-preload-019660 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p no-preload-019660 logs -n 25: (1.823722377s)
helpers_test.go:260: TestStartStop/group/no-preload/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ start │ -p old-k8s-version-896471 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=kvm2 --kubernetes-version=v1.28.0 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ addons │ enable metrics-server -p no-preload-019660 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ stop │ -p no-preload-019660 --alsologtostderr -v=3 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ addons │ enable dashboard -p no-preload-019660 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ start │ -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2 --kubernetes-version=v1.34.1 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
│ addons │ enable metrics-server -p embed-certs-059363 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ stop │ -p embed-certs-059363 --alsologtostderr -v=3 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ addons │ enable dashboard -p embed-certs-059363 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ start │ -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2 --kubernetes-version=v1.34.1 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
│ addons │ enable metrics-server -p default-k8s-diff-port-925051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ stop │ -p default-k8s-diff-port-925051 --alsologtostderr -v=3 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ image │ old-k8s-version-896471 image list --format=json │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ pause │ -p old-k8s-version-896471 --alsologtostderr -v=1 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ unpause │ -p old-k8s-version-896471 --alsologtostderr -v=1 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ delete │ -p old-k8s-version-896471 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ delete │ -p old-k8s-version-896471 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ start │ -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2 --kubernetes-version=v1.34.1 │ newest-cni-078196 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ │
│ addons │ enable dashboard -p default-k8s-diff-port-925051 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ start │ -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2 --kubernetes-version=v1.34.1 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ │
│ image │ no-preload-019660 image list --format=json │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ pause │ -p no-preload-019660 --alsologtostderr -v=1 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ image │ embed-certs-059363 image list --format=json │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ pause │ -p embed-certs-059363 --alsologtostderr -v=1 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ unpause │ -p no-preload-019660 --alsologtostderr -v=1 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ unpause │ -p embed-certs-059363 --alsologtostderr -v=1 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/23 08:57:16
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 08:57:16.853497 62480 out.go:360] Setting OutFile to fd 1 ...
I1123 08:57:16.853743 62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:57:16.853753 62480 out.go:374] Setting ErrFile to fd 2...
I1123 08:57:16.853757 62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:57:16.854434 62480 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:57:16.855203 62480 out.go:368] Setting JSON to false
I1123 08:57:16.856605 62480 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":5986,"bootTime":1763882251,"procs":197,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1123 08:57:16.856696 62480 start.go:143] virtualization: kvm guest
I1123 08:57:16.935723 62480 out.go:179] * [default-k8s-diff-port-925051] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1123 08:57:16.941914 62480 out.go:179] - MINIKUBE_LOCATION=21966
I1123 08:57:16.941916 62480 notify.go:221] Checking for updates...
I1123 08:57:16.943817 62480 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 08:57:16.945573 62480 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
I1123 08:57:16.946745 62480 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
I1123 08:57:16.947938 62480 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1123 08:57:16.949027 62480 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 08:57:16.950511 62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:16.951037 62480 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 08:57:16.994324 62480 out.go:179] * Using the kvm2 driver based on existing profile
I1123 08:57:16.995670 62480 start.go:309] selected driver: kvm2
I1123 08:57:16.995691 62480 start.go:927] validating driver "kvm2" against &{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesCo
nfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] L
istenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:16.995851 62480 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 08:57:16.997354 62480 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:57:16.997396 62480 cni.go:84] Creating CNI manager for ""
I1123 08:57:16.997466 62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:16.997521 62480 start.go:353] cluster config:
{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Names
pace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpir
ation:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:16.997662 62480 iso.go:125] acquiring lock: {Name:mk9cdb644d601a15f26caa6d527f7a63e06eb691 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:57:16.999287 62480 out.go:179] * Starting "default-k8s-diff-port-925051" primary control-plane node in "default-k8s-diff-port-925051" cluster
I1123 08:57:16.538965 62034 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:16.543216 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.543908 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.543934 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.544164 62034 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/config.json ...
I1123 08:57:16.544418 62034 machine.go:94] provisionDockerMachine start ...
I1123 08:57:16.547123 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.547583 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.547608 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.547766 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:16.547963 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:16.547972 62034 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:57:16.673771 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1123 08:57:16.673806 62034 buildroot.go:166] provisioning hostname "embed-certs-059363"
I1123 08:57:16.677167 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.677679 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.677711 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.677931 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:16.678192 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:16.678214 62034 main.go:143] libmachine: About to run SSH command:
sudo hostname embed-certs-059363 && echo "embed-certs-059363" | sudo tee /etc/hostname
I1123 08:57:16.832499 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-059363
I1123 08:57:16.837251 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.837813 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.837855 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.838109 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:16.838438 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:16.838465 62034 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-059363' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-059363/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-059363' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:57:16.972318 62034 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:16.972350 62034 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
I1123 08:57:16.972374 62034 buildroot.go:174] setting up certificates
I1123 08:57:16.972395 62034 provision.go:84] configureAuth start
I1123 08:57:16.976994 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.977623 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.977662 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.980665 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.981134 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.981158 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.981351 62034 provision.go:143] copyHostCerts
I1123 08:57:16.981431 62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
I1123 08:57:16.981446 62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
I1123 08:57:16.981523 62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
I1123 08:57:16.981635 62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
I1123 08:57:16.981646 62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
I1123 08:57:16.981690 62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
I1123 08:57:16.981769 62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
I1123 08:57:16.981779 62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
I1123 08:57:16.981817 62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
I1123 08:57:16.981897 62034 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.embed-certs-059363 san=[127.0.0.1 192.168.72.170 embed-certs-059363 localhost minikube]
I1123 08:57:17.112794 62034 provision.go:177] copyRemoteCerts
I1123 08:57:17.112848 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:57:17.115853 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.116282 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.116308 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.116478 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:17.223809 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1123 08:57:17.266771 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 08:57:17.305976 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I1123 08:57:17.336820 62034 provision.go:87] duration metric: took 364.408049ms to configureAuth
I1123 08:57:17.336863 62034 buildroot.go:189] setting minikube options for container-runtime
I1123 08:57:17.337080 62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:17.339671 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.340090 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.340112 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.340318 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:17.340623 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:17.340643 62034 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1123 08:57:17.463677 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1123 08:57:17.463707 62034 buildroot.go:70] root file system type: tmpfs
I1123 08:57:17.463928 62034 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1123 08:57:17.467227 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.467655 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.467686 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.467940 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:17.468174 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:17.468268 62034 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1123 08:57:17.602870 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1123 08:57:17.606541 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.607111 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.607152 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.607427 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:17.607698 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:17.607716 62034 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1123 08:57:19.186051 62386 start.go:364] duration metric: took 9.989286317s to acquireMachinesLock for "newest-cni-078196"
I1123 08:57:19.186120 62386 start.go:93] Provisioning new machine with config: &{Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{K
ubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:2
62144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1123 08:57:19.186215 62386 start.go:125] createHost starting for "" (driver="kvm2")
W1123 08:57:15.950255 61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
W1123 08:57:17.951890 61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
I1123 08:57:19.962419 61684 pod_ready.go:94] pod "coredns-66bc5c9577-nj6pk" is "Ready"
I1123 08:57:19.962449 61684 pod_ready.go:86] duration metric: took 8.021055049s for pod "coredns-66bc5c9577-nj6pk" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.967799 61684 pod_ready.go:83] waiting for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.977812 61684 pod_ready.go:94] pod "etcd-no-preload-019660" is "Ready"
I1123 08:57:19.977834 61684 pod_ready.go:86] duration metric: took 10.013782ms for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.982683 61684 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.992798 61684 pod_ready.go:94] pod "kube-apiserver-no-preload-019660" is "Ready"
I1123 08:57:19.992831 61684 pod_ready.go:86] duration metric: took 10.122708ms for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.997939 61684 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.145706 61684 pod_ready.go:94] pod "kube-controller-manager-no-preload-019660" is "Ready"
I1123 08:57:20.145742 61684 pod_ready.go:86] duration metric: took 147.777309ms for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.355205 61684 pod_ready.go:83] waiting for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.756189 61684 pod_ready.go:94] pod "kube-proxy-wlb9w" is "Ready"
I1123 08:57:20.756259 61684 pod_ready.go:86] duration metric: took 400.985169ms for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.947647 61684 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:21.350509 61684 pod_ready.go:94] pod "kube-scheduler-no-preload-019660" is "Ready"
I1123 08:57:21.350539 61684 pod_ready.go:86] duration metric: took 402.864201ms for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:21.350552 61684 pod_ready.go:40] duration metric: took 9.416731421s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:57:21.405369 61684 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 08:57:21.409795 61684 out.go:179] * Done! kubectl is now configured to use "no-preload-019660" cluster and "default" namespace by default
I1123 08:57:17.000521 62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:57:17.000560 62480 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
I1123 08:57:17.000571 62480 cache.go:65] Caching tarball of preloaded images
I1123 08:57:17.000667 62480 preload.go:238] Found /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1123 08:57:17.000683 62480 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on docker
I1123 08:57:17.000806 62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
I1123 08:57:17.001089 62480 start.go:360] acquireMachinesLock for default-k8s-diff-port-925051: {Name:mka7dedac533b164a995f5c19cff4f68d827bd22 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1123 08:57:18.895461 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1123 08:57:18.895495 62034 machine.go:97] duration metric: took 2.351059819s to provisionDockerMachine
I1123 08:57:18.895519 62034 start.go:293] postStartSetup for "embed-certs-059363" (driver="kvm2")
I1123 08:57:18.895547 62034 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:57:18.895631 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:57:18.899037 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:18.899549 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:18.899585 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:18.899747 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:18.995822 62034 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:57:19.001215 62034 info.go:137] Remote host: Buildroot 2025.02
I1123 08:57:19.001261 62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
I1123 08:57:19.001335 62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
I1123 08:57:19.001434 62034 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
I1123 08:57:19.001551 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:57:19.015155 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:19.054248 62034 start.go:296] duration metric: took 158.692501ms for postStartSetup
I1123 08:57:19.054294 62034 fix.go:56] duration metric: took 20.246777293s for fixHost
I1123 08:57:19.058146 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.058727 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.058771 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.058998 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:19.059317 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:19.059336 62034 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1123 08:57:19.185896 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888239.115597688
I1123 08:57:19.185919 62034 fix.go:216] guest clock: 1763888239.115597688
I1123 08:57:19.185926 62034 fix.go:229] Guest: 2025-11-23 08:57:19.115597688 +0000 UTC Remote: 2025-11-23 08:57:19.054315183 +0000 UTC m=+20.376918396 (delta=61.282505ms)
I1123 08:57:19.185941 62034 fix.go:200] guest clock delta is within tolerance: 61.282505ms
I1123 08:57:19.185962 62034 start.go:83] releasing machines lock for "embed-certs-059363", held for 20.37844631s
I1123 08:57:19.189984 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.190596 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.190635 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.191288 62034 ssh_runner.go:195] Run: cat /version.json
I1123 08:57:19.191295 62034 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:57:19.195221 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.195642 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.195676 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.195699 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.195883 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:19.196195 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.196264 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.196563 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:19.315903 62034 ssh_runner.go:195] Run: systemctl --version
I1123 08:57:19.323178 62034 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:57:19.333159 62034 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:57:19.333365 62034 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:57:19.356324 62034 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:57:19.356355 62034 start.go:496] detecting cgroup driver to use...
I1123 08:57:19.356469 62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:19.385750 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:57:19.400434 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:57:19.414104 62034 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:57:19.414182 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:57:19.433788 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:19.449538 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:57:19.464107 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:19.481469 62034 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:57:19.496533 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:57:19.511385 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:57:19.525634 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:57:19.544298 62034 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:57:19.560120 62034 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1123 08:57:19.560179 62034 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1123 08:57:19.576631 62034 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:57:19.592833 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:19.763221 62034 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:57:19.811223 62034 start.go:496] detecting cgroup driver to use...
I1123 08:57:19.811335 62034 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1123 08:57:19.833532 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:19.859627 62034 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1123 08:57:19.884432 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:19.903805 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:19.921275 62034 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:57:19.960990 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:19.980317 62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:20.008661 62034 ssh_runner.go:195] Run: which cri-dockerd
I1123 08:57:20.013631 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1123 08:57:20.029302 62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1123 08:57:20.057103 62034 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1123 08:57:20.252891 62034 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1123 08:57:20.490326 62034 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1123 08:57:20.490458 62034 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1123 08:57:20.526773 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:57:20.548985 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:20.740694 62034 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:57:21.481342 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:57:21.507341 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1123 08:57:21.530703 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:21.555618 62034 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1123 08:57:21.736442 62034 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1123 08:57:21.910308 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:22.084793 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1123 08:57:22.133988 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:22.150466 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:22.310923 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:22.333687 62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1123 08:57:22.355809 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:22.373321 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:22.392686 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:22.568456 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:22.588895 62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1123 08:57:22.604152 62034 retry.go:31] will retry after 1.30731135s: cri-docker.service not running
I1123 08:57:19.188404 62386 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
I1123 08:57:19.188687 62386 start.go:159] libmachine.API.Create for "newest-cni-078196" (driver="kvm2")
I1123 08:57:19.188735 62386 client.go:173] LocalClient.Create starting
I1123 08:57:19.188852 62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem
I1123 08:57:19.188919 62386 main.go:143] libmachine: Decoding PEM data...
I1123 08:57:19.188950 62386 main.go:143] libmachine: Parsing certificate...
I1123 08:57:19.189026 62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem
I1123 08:57:19.189059 62386 main.go:143] libmachine: Decoding PEM data...
I1123 08:57:19.189080 62386 main.go:143] libmachine: Parsing certificate...
I1123 08:57:19.189577 62386 main.go:143] libmachine: creating domain...
I1123 08:57:19.189595 62386 main.go:143] libmachine: creating network...
I1123 08:57:19.191331 62386 main.go:143] libmachine: found existing default network
I1123 08:57:19.191879 62386 main.go:143] libmachine: <network connections='3'>
<name>default</name>
<uuid>c61344c2-dba2-46dd-a21a-34776d235985</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:10:a2:1d'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
I1123 08:57:19.193313 62386 network.go:206] using free private subnet 192.168.39.0/24: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e04740}
I1123 08:57:19.193434 62386 main.go:143] libmachine: defining private network:
<network>
<name>mk-newest-cni-078196</name>
<dns enable='no'/>
<ip address='192.168.39.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.39.2' end='192.168.39.253'/>
</dhcp>
</ip>
</network>
I1123 08:57:19.200866 62386 main.go:143] libmachine: creating private network mk-newest-cni-078196 192.168.39.0/24...
I1123 08:57:19.291873 62386 main.go:143] libmachine: private network mk-newest-cni-078196 192.168.39.0/24 created
I1123 08:57:19.292226 62386 main.go:143] libmachine: <network>
<name>mk-newest-cni-078196</name>
<uuid>d7bc9eb0-778c-4b77-a392-72f78dc9558b</uuid>
<bridge name='virbr1' stp='on' delay='0'/>
<mac address='52:54:00:20:cc:6a'/>
<dns enable='no'/>
<ip address='192.168.39.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.39.2' end='192.168.39.253'/>
</dhcp>
</ip>
</network>
I1123 08:57:19.292287 62386 main.go:143] libmachine: setting up store path in /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
I1123 08:57:19.292318 62386 main.go:143] libmachine: building disk image from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso
I1123 08:57:19.292332 62386 common.go:152] Making disk image using store path: /home/jenkins/minikube-integration/21966-18241/.minikube
I1123 08:57:19.292416 62386 main.go:143] libmachine: Downloading /home/jenkins/minikube-integration/21966-18241/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso...
I1123 08:57:19.540811 62386 common.go:159] Creating ssh key: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa...
I1123 08:57:19.628322 62386 common.go:165] Creating raw disk image: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk...
I1123 08:57:19.628370 62386 main.go:143] libmachine: Writing magic tar header
I1123 08:57:19.628409 62386 main.go:143] libmachine: Writing SSH key tar header
I1123 08:57:19.628532 62386 common.go:179] Fixing permissions on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
I1123 08:57:19.628646 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196
I1123 08:57:19.628680 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 (perms=drwx------)
I1123 08:57:19.628696 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines
I1123 08:57:19.628716 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines (perms=drwxr-xr-x)
I1123 08:57:19.628737 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube
I1123 08:57:19.628753 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube (perms=drwxr-xr-x)
I1123 08:57:19.628766 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241
I1123 08:57:19.628783 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241 (perms=drwxrwxr-x)
I1123 08:57:19.628796 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration
I1123 08:57:19.628812 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration (perms=drwxrwxr-x)
I1123 08:57:19.628825 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins
I1123 08:57:19.628845 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
I1123 08:57:19.628862 62386 main.go:143] libmachine: checking permissions on dir: /home
I1123 08:57:19.628874 62386 main.go:143] libmachine: skipping /home - not owner
I1123 08:57:19.628886 62386 main.go:143] libmachine: defining domain...
I1123 08:57:19.630619 62386 main.go:143] libmachine: defining domain using XML:
<domain type='kvm'>
<name>newest-cni-078196</name>
<memory unit='MiB'>3072</memory>
<vcpu>2</vcpu>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'>
</cpu>
<os>
<type>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<devices>
<disk type='file' device='cdrom'>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='default' io='threads' />
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='mk-newest-cni-078196'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
</rng>
</devices>
</domain>
I1123 08:57:19.637651 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:7a:a4:6b in network default
I1123 08:57:19.638554 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:19.638580 62386 main.go:143] libmachine: starting domain...
I1123 08:57:19.638587 62386 main.go:143] libmachine: ensuring networks are active...
I1123 08:57:19.639501 62386 main.go:143] libmachine: Ensuring network default is active
I1123 08:57:19.640013 62386 main.go:143] libmachine: Ensuring network mk-newest-cni-078196 is active
I1123 08:57:19.640748 62386 main.go:143] libmachine: getting domain XML...
I1123 08:57:19.642270 62386 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>newest-cni-078196</name>
<uuid>67bf4217-d2fd-4841-a93c-e1581f4c5592</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:d7:c1:0d'/>
<source network='mk-newest-cni-078196'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:7a:a4:6b'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1123 08:57:21.239037 62386 main.go:143] libmachine: waiting for domain to start...
I1123 08:57:21.240876 62386 main.go:143] libmachine: domain is now running
I1123 08:57:21.240900 62386 main.go:143] libmachine: waiting for IP...
I1123 08:57:21.241736 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:21.242592 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:21.242611 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:21.243307 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:21.243346 62386 retry.go:31] will retry after 218.272628ms: waiting for domain to come up
I1123 08:57:21.462945 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:21.463818 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:21.463835 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:21.464322 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:21.464353 62386 retry.go:31] will retry after 354.758102ms: waiting for domain to come up
I1123 08:57:21.820932 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:21.821871 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:21.821891 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:21.822290 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:21.822322 62386 retry.go:31] will retry after 480.079581ms: waiting for domain to come up
I1123 08:57:22.304134 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:22.305030 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:22.305053 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:22.305471 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:22.305501 62386 retry.go:31] will retry after 430.762091ms: waiting for domain to come up
I1123 08:57:22.738137 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:22.739007 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:22.739022 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:22.739466 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:22.739499 62386 retry.go:31] will retry after 752.582052ms: waiting for domain to come up
I1123 08:57:23.493414 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:23.494256 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:23.494271 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:23.494669 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:23.494696 62386 retry.go:31] will retry after 765.228537ms: waiting for domain to come up
I1123 08:57:23.912604 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:23.930659 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:23.946465 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:24.099133 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:24.217974 62034 retry.go:31] will retry after 1.350292483s: cri-docker.service not running
I1123 08:57:25.569520 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:25.588082 62034 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1123 08:57:25.588166 62034 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1123 08:57:25.595521 62034 start.go:564] Will wait 60s for crictl version
I1123 08:57:25.595597 62034 ssh_runner.go:195] Run: which crictl
I1123 08:57:25.600903 62034 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1123 08:57:25.642159 62034 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1123 08:57:25.642260 62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:25.678324 62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:25.708968 62034 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1123 08:57:25.712357 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:25.712811 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:25.712861 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:25.713088 62034 ssh_runner.go:195] Run: grep 192.168.72.1 host.minikube.internal$ /etc/hosts
I1123 08:57:25.718506 62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.72.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:25.737282 62034 kubeadm.go:884] updating cluster {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: Multi
NodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:57:25.737446 62034 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:57:25.737523 62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:25.759347 62034 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:57:25.759372 62034 docker.go:621] Images already preloaded, skipping extraction
I1123 08:57:25.759440 62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:25.784761 62034 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:57:25.784786 62034 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:57:25.784796 62034 kubeadm.go:935] updating node { 192.168.72.170 8443 v1.34.1 docker true true} ...
I1123 08:57:25.784906 62034 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-059363 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.72.170
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:57:25.784959 62034 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1123 08:57:25.840443 62034 cni.go:84] Creating CNI manager for ""
I1123 08:57:25.840484 62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:25.840500 62034 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:57:25.840520 62034 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.72.170 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-059363 NodeName:embed-certs-059363 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.72.170"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.72.170 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:57:25.840651 62034 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.72.170
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "embed-certs-059363"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.72.170"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.72.170"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:57:25.840731 62034 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:57:25.855481 62034 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:57:25.855562 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:57:25.869149 62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (319 bytes)
I1123 08:57:25.890030 62034 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:57:25.913602 62034 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
I1123 08:57:25.939399 62034 ssh_runner.go:195] Run: grep 192.168.72.170 control-plane.minikube.internal$ /etc/hosts
I1123 08:57:25.944187 62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.72.170 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:25.959980 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:26.112182 62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:57:26.150488 62034 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363 for IP: 192.168.72.170
I1123 08:57:26.150514 62034 certs.go:195] generating shared ca certs ...
I1123 08:57:26.150535 62034 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:26.150704 62034 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
I1123 08:57:26.150759 62034 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
I1123 08:57:26.150773 62034 certs.go:257] generating profile certs ...
I1123 08:57:26.150910 62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/client.key
I1123 08:57:26.151011 62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key.4b3bdd21
I1123 08:57:26.151069 62034 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key
I1123 08:57:26.151216 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
W1123 08:57:26.151290 62034 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
I1123 08:57:26.151305 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:57:26.151344 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
I1123 08:57:26.151380 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
I1123 08:57:26.151415 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
I1123 08:57:26.151483 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:26.152356 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:57:26.201568 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:57:26.246367 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:57:26.299610 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:57:26.334177 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
I1123 08:57:26.372484 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1123 08:57:26.408684 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:57:26.449833 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1123 08:57:26.493006 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
I1123 08:57:26.527341 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:57:26.564892 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
I1123 08:57:26.601408 62034 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:57:26.626296 62034 ssh_runner.go:195] Run: openssl version
I1123 08:57:26.634385 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
I1123 08:57:26.650265 62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
I1123 08:57:26.657578 62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
I1123 08:57:26.657632 62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
I1123 08:57:26.666331 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:57:26.682746 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:57:26.697978 62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:26.704544 62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:26.704612 62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:26.714575 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:57:26.730139 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
I1123 08:57:26.745401 62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
I1123 08:57:26.751383 62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
I1123 08:57:26.751450 62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
I1123 08:57:26.760273 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
I1123 08:57:26.775477 62034 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:57:26.782298 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1123 08:57:26.790966 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1123 08:57:26.800082 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1123 08:57:26.809033 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1123 08:57:26.818403 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1123 08:57:26.827424 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1123 08:57:26.836600 62034 kubeadm.go:401] StartCluster: {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNod
eRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:26.836750 62034 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1123 08:57:26.857858 62034 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:57:26.872778 62034 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1123 08:57:26.872804 62034 kubeadm.go:598] restartPrimaryControlPlane start ...
I1123 08:57:26.872861 62034 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1123 08:57:26.887408 62034 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1123 08:57:26.888007 62034 kubeconfig.go:47] verify endpoint returned: get endpoint: "embed-certs-059363" does not appear in /home/jenkins/minikube-integration/21966-18241/kubeconfig
I1123 08:57:26.888341 62034 kubeconfig.go:62] /home/jenkins/minikube-integration/21966-18241/kubeconfig needs updating (will repair): [kubeconfig missing "embed-certs-059363" cluster setting kubeconfig missing "embed-certs-059363" context setting]
I1123 08:57:26.888835 62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:26.917419 62034 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1123 08:57:26.931495 62034 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.72.170
I1123 08:57:26.931533 62034 kubeadm.go:1161] stopping kube-system containers ...
I1123 08:57:26.931598 62034 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1123 08:57:26.956424 62034 docker.go:484] Stopping containers: [2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58]
I1123 08:57:26.956515 62034 ssh_runner.go:195] Run: docker stop 2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58
I1123 08:57:26.982476 62034 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1123 08:57:27.015459 62034 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:57:27.030576 62034 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:57:27.030600 62034 kubeadm.go:158] found existing configuration files:
I1123 08:57:27.030658 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:57:27.043658 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:57:27.043723 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:57:27.058167 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:57:27.074375 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:57:27.074449 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:57:27.091119 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:57:27.106772 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:57:27.106876 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:57:27.124425 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:57:27.140001 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:57:27.140061 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:57:27.154930 62034 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:57:27.169444 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:27.328883 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:24.261134 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:24.261787 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:24.261806 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:24.262181 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:24.262219 62386 retry.go:31] will retry after 1.137472458s: waiting for domain to come up
I1123 08:57:25.401597 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:25.402373 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:25.402395 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:25.402716 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:25.402745 62386 retry.go:31] will retry after 1.246843188s: waiting for domain to come up
I1123 08:57:26.651383 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:26.652402 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:26.652423 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:26.652983 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:26.653027 62386 retry.go:31] will retry after 1.576847177s: waiting for domain to come up
I1123 08:57:28.231063 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:28.231892 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:28.231907 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:28.232342 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:28.232376 62386 retry.go:31] will retry after 2.191968701s: waiting for domain to come up
I1123 08:57:29.072122 62034 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.743194687s)
I1123 08:57:29.072199 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:29.363322 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:29.437121 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:29.519180 62034 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:57:29.519372 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:30.019409 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:30.519973 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:31.019428 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:31.127420 62034 api_server.go:72] duration metric: took 1.608256805s to wait for apiserver process to appear ...
I1123 08:57:31.127455 62034 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:57:31.127480 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:31.128203 62034 api_server.go:269] stopped: https://192.168.72.170:8443/healthz: Get "https://192.168.72.170:8443/healthz": dial tcp 192.168.72.170:8443: connect: connection refused
I1123 08:57:31.627812 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:30.426848 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:30.427811 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:30.427838 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:30.428254 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:30.428293 62386 retry.go:31] will retry after 2.66246372s: waiting for domain to come up
I1123 08:57:33.093605 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:33.094467 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:33.094487 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:33.095017 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:33.095058 62386 retry.go:31] will retry after 2.368738453s: waiting for domain to come up
I1123 08:57:34.364730 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W1123 08:57:34.364762 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I1123 08:57:34.364778 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:34.401309 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W1123 08:57:34.401349 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I1123 08:57:34.627677 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:34.639017 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1123 08:57:34.639052 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1123 08:57:35.127669 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:35.133471 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1123 08:57:35.133500 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1123 08:57:35.628190 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:35.637607 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1123 08:57:35.637636 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1123 08:57:36.128401 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:36.134007 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
ok
I1123 08:57:36.142338 62034 api_server.go:141] control plane version: v1.34.1
I1123 08:57:36.142374 62034 api_server.go:131] duration metric: took 5.014912025s to wait for apiserver health ...
I1123 08:57:36.142383 62034 cni.go:84] Creating CNI manager for ""
I1123 08:57:36.142394 62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:36.144644 62034 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1123 08:57:36.146156 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1123 08:57:36.172405 62034 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1123 08:57:36.206117 62034 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:57:36.212151 62034 system_pods.go:59] 8 kube-system pods found
I1123 08:57:36.212192 62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:57:36.212201 62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1123 08:57:36.212209 62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1123 08:57:36.212215 62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1123 08:57:36.212219 62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
I1123 08:57:36.212227 62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1123 08:57:36.212254 62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1123 08:57:36.212263 62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
I1123 08:57:36.212272 62034 system_pods.go:74] duration metric: took 6.125497ms to wait for pod list to return data ...
I1123 08:57:36.212281 62034 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:57:36.216399 62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1123 08:57:36.216437 62034 node_conditions.go:123] node cpu capacity is 2
I1123 08:57:36.216455 62034 node_conditions.go:105] duration metric: took 4.163261ms to run NodePressure ...
I1123 08:57:36.216523 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:36.499954 62034 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1123 08:57:36.504225 62034 kubeadm.go:744] kubelet initialised
I1123 08:57:36.504271 62034 kubeadm.go:745] duration metric: took 4.279186ms waiting for restarted kubelet to initialise ...
I1123 08:57:36.504293 62034 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:57:36.525819 62034 ops.go:34] apiserver oom_adj: -16
I1123 08:57:36.525847 62034 kubeadm.go:602] duration metric: took 9.653035112s to restartPrimaryControlPlane
I1123 08:57:36.525859 62034 kubeadm.go:403] duration metric: took 9.689268169s to StartCluster
I1123 08:57:36.525879 62034 settings.go:142] acquiring lock: {Name:mk0efabf238cb985c892ac3a9b32ac206b9f2336 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:36.525969 62034 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21966-18241/kubeconfig
I1123 08:57:36.527038 62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:36.527368 62034 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1123 08:57:36.527458 62034 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:57:36.527579 62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:36.527600 62034 addons.go:70] Setting metrics-server=true in profile "embed-certs-059363"
I1123 08:57:36.527599 62034 addons.go:70] Setting default-storageclass=true in profile "embed-certs-059363"
I1123 08:57:36.527579 62034 addons.go:70] Setting storage-provisioner=true in profile "embed-certs-059363"
I1123 08:57:36.527644 62034 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "embed-certs-059363"
I1123 08:57:36.527635 62034 addons.go:70] Setting dashboard=true in profile "embed-certs-059363"
I1123 08:57:36.527665 62034 addons.go:239] Setting addon dashboard=true in "embed-certs-059363"
W1123 08:57:36.527679 62034 addons.go:248] addon dashboard should already be in state true
I1123 08:57:36.527666 62034 cache.go:107] acquiring lock: {Name:mk5578ff0020d8c222414769e0c7ca17014d52f1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:57:36.527671 62034 addons.go:239] Setting addon storage-provisioner=true in "embed-certs-059363"
W1123 08:57:36.527702 62034 addons.go:248] addon storage-provisioner should already be in state true
I1123 08:57:36.527733 62034 cache.go:115] /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1123 08:57:36.527637 62034 addons.go:239] Setting addon metrics-server=true in "embed-certs-059363"
I1123 08:57:36.527748 62034 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 96.823µs
I1123 08:57:36.527758 62034 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1123 08:57:36.527763 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.527766 62034 cache.go:87] Successfully saved all images to host disk.
W1123 08:57:36.527758 62034 addons.go:248] addon metrics-server should already be in state true
I1123 08:57:36.527796 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.527738 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.527934 62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:36.529271 62034 out.go:179] * Verifying Kubernetes components...
I1123 08:57:36.530935 62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:36.531022 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:36.532294 62034 addons.go:239] Setting addon default-storageclass=true in "embed-certs-059363"
W1123 08:57:36.532326 62034 addons.go:248] addon default-storageclass should already be in state true
I1123 08:57:36.532348 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.533191 62034 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1123 08:57:36.533215 62034 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:57:36.533195 62034 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1123 08:57:36.534073 62034 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:57:36.534091 62034 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:57:36.534667 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.535129 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.535347 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.535858 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.536061 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1123 08:57:36.536084 62034 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1123 08:57:36.536132 62034 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:57:36.536145 62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:57:36.536880 62034 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1123 08:57:36.537788 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.538214 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1123 08:57:36.538249 62034 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I1123 08:57:36.538746 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.538816 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.539088 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.540090 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.540146 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.541026 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.541069 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.541120 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.541158 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.541257 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.541514 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.542423 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.542896 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.542931 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.543116 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.844170 62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:57:36.869742 62034 node_ready.go:35] waiting up to 6m0s for node "embed-certs-059363" to be "Ready" ...
I1123 08:57:36.960323 62034 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:57:36.960371 62034 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:57:36.960379 62034 cache_images.go:264] succeeded pushing to: embed-certs-059363
I1123 08:57:37.000609 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:57:37.008492 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:57:37.017692 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1123 08:57:37.017713 62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1123 08:57:37.020529 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1123 08:57:37.020561 62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1123 08:57:37.074670 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1123 08:57:37.074710 62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1123 08:57:37.076076 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1123 08:57:37.076096 62034 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1123 08:57:37.132446 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1123 08:57:37.132466 62034 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1123 08:57:37.134322 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1123 08:57:37.134339 62034 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1123 08:57:37.188291 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1123 08:57:37.188311 62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1123 08:57:37.200924 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1123 08:57:37.265084 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1123 08:57:37.265109 62034 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1123 08:57:37.341532 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1123 08:57:37.341559 62034 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1123 08:57:37.425079 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1123 08:57:37.425110 62034 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1123 08:57:37.510704 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1123 08:57:37.510748 62034 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1123 08:57:37.600957 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1123 08:57:37.600982 62034 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1123 08:57:37.663098 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1123 08:57:38.728547 62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.720019273s)
I1123 08:57:38.824306 62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.623332944s)
I1123 08:57:38.824374 62034 addons.go:495] Verifying addon metrics-server=true in "embed-certs-059363"
W1123 08:57:38.886375 62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
I1123 08:57:39.122207 62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.459038888s)
I1123 08:57:39.124248 62034 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p embed-certs-059363 addons enable metrics-server
I1123 08:57:39.126125 62034 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1123 08:57:35.465742 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:35.466525 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:35.466540 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:35.467003 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:35.467033 62386 retry.go:31] will retry after 4.454598391s: waiting for domain to come up
I1123 08:57:42.467134 62480 start.go:364] duration metric: took 25.46601127s to acquireMachinesLock for "default-k8s-diff-port-925051"
I1123 08:57:42.467190 62480 start.go:96] Skipping create...Using existing machine configuration
I1123 08:57:42.467196 62480 fix.go:54] fixHost starting:
I1123 08:57:42.469900 62480 fix.go:112] recreateIfNeeded on default-k8s-diff-port-925051: state=Stopped err=<nil>
W1123 08:57:42.469946 62480 fix.go:138] unexpected machine state, will restart: <nil>
I1123 08:57:39.127521 62034 addons.go:530] duration metric: took 2.600069679s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
W1123 08:57:41.375432 62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
I1123 08:57:39.922903 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:39.923713 62386 main.go:143] libmachine: domain newest-cni-078196 has current primary IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:39.923726 62386 main.go:143] libmachine: found domain IP: 192.168.39.87
I1123 08:57:39.923732 62386 main.go:143] libmachine: reserving static IP address...
I1123 08:57:39.924129 62386 main.go:143] libmachine: unable to find host DHCP lease matching {name: "newest-cni-078196", mac: "52:54:00:d7:c1:0d", ip: "192.168.39.87"} in network mk-newest-cni-078196
I1123 08:57:40.154544 62386 main.go:143] libmachine: reserved static IP address 192.168.39.87 for domain newest-cni-078196
I1123 08:57:40.154569 62386 main.go:143] libmachine: waiting for SSH...
I1123 08:57:40.154577 62386 main.go:143] libmachine: Getting to WaitForSSH function...
I1123 08:57:40.157877 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.158255 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:minikube Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.158277 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.158452 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.158677 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.158690 62386 main.go:143] libmachine: About to run SSH command:
exit 0
I1123 08:57:40.266068 62386 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:40.266484 62386 main.go:143] libmachine: domain creation complete
I1123 08:57:40.268135 62386 machine.go:94] provisionDockerMachine start ...
I1123 08:57:40.270701 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.271083 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.271106 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.271243 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.271436 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.271446 62386 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:57:40.377718 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1123 08:57:40.377749 62386 buildroot.go:166] provisioning hostname "newest-cni-078196"
I1123 08:57:40.381682 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.382224 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.382274 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.382549 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.382750 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.382763 62386 main.go:143] libmachine: About to run SSH command:
sudo hostname newest-cni-078196 && echo "newest-cni-078196" | sudo tee /etc/hostname
I1123 08:57:40.510920 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: newest-cni-078196
I1123 08:57:40.514470 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.514870 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.514901 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.515119 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.515349 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.515373 62386 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\snewest-cni-078196' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-078196/g' /etc/hosts;
else
echo '127.0.1.1 newest-cni-078196' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:57:40.644008 62386 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:40.644045 62386 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
I1123 08:57:40.644119 62386 buildroot.go:174] setting up certificates
I1123 08:57:40.644132 62386 provision.go:84] configureAuth start
I1123 08:57:40.647940 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.648462 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.648495 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.651488 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.651967 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.652002 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.652153 62386 provision.go:143] copyHostCerts
I1123 08:57:40.652210 62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
I1123 08:57:40.652252 62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
I1123 08:57:40.652340 62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
I1123 08:57:40.652511 62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
I1123 08:57:40.652528 62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
I1123 08:57:40.652580 62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
I1123 08:57:40.652714 62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
I1123 08:57:40.652735 62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
I1123 08:57:40.652778 62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
I1123 08:57:40.652872 62386 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.newest-cni-078196 san=[127.0.0.1 192.168.39.87 localhost minikube newest-cni-078196]
I1123 08:57:40.723606 62386 provision.go:177] copyRemoteCerts
I1123 08:57:40.723663 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:57:40.726615 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.727086 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.727115 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.727301 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:40.819420 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 08:57:40.852505 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1123 08:57:40.888555 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 08:57:40.923977 62386 provision.go:87] duration metric: took 279.828188ms to configureAuth
I1123 08:57:40.924014 62386 buildroot.go:189] setting minikube options for container-runtime
I1123 08:57:40.924275 62386 config.go:182] Loaded profile config "newest-cni-078196": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:40.927517 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.927915 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.927938 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.928098 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.928391 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.928404 62386 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1123 08:57:41.042673 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1123 08:57:41.042707 62386 buildroot.go:70] root file system type: tmpfs
I1123 08:57:41.042873 62386 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1123 08:57:41.046445 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.046989 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:41.047094 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.047391 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:41.047683 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:41.047769 62386 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1123 08:57:41.175224 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1123 08:57:41.178183 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.178676 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:41.178702 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.178902 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:41.179152 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:41.179171 62386 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1123 08:57:42.186295 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1123 08:57:42.186331 62386 machine.go:97] duration metric: took 1.918179804s to provisionDockerMachine
I1123 08:57:42.186347 62386 client.go:176] duration metric: took 22.997600307s to LocalClient.Create
I1123 08:57:42.186371 62386 start.go:167] duration metric: took 22.997685492s to libmachine.API.Create "newest-cni-078196"
I1123 08:57:42.186382 62386 start.go:293] postStartSetup for "newest-cni-078196" (driver="kvm2")
I1123 08:57:42.186396 62386 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:57:42.186475 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:57:42.189917 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.190351 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.190388 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.190560 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:42.283393 62386 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:57:42.289999 62386 info.go:137] Remote host: Buildroot 2025.02
I1123 08:57:42.290030 62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
I1123 08:57:42.290117 62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
I1123 08:57:42.290218 62386 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
I1123 08:57:42.290354 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:57:42.306924 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:42.343081 62386 start.go:296] duration metric: took 156.683452ms for postStartSetup
I1123 08:57:42.347012 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.347579 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.347619 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.347939 62386 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/config.json ...
I1123 08:57:42.348140 62386 start.go:128] duration metric: took 23.161911818s to createHost
I1123 08:57:42.350835 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.351301 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.351336 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.351513 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:42.351791 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:42.351806 62386 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1123 08:57:42.466967 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888262.440217357
I1123 08:57:42.466993 62386 fix.go:216] guest clock: 1763888262.440217357
I1123 08:57:42.467001 62386 fix.go:229] Guest: 2025-11-23 08:57:42.440217357 +0000 UTC Remote: 2025-11-23 08:57:42.348151583 +0000 UTC m=+33.279616417 (delta=92.065774ms)
I1123 08:57:42.467025 62386 fix.go:200] guest clock delta is within tolerance: 92.065774ms
I1123 08:57:42.467033 62386 start.go:83] releasing machines lock for "newest-cni-078196", held for 23.280957089s
I1123 08:57:42.471032 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.471501 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.471531 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.472456 62386 ssh_runner.go:195] Run: cat /version.json
I1123 08:57:42.472536 62386 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:57:42.477011 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.477058 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.477612 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.477644 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.479664 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.479706 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.480287 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:42.480869 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:42.593772 62386 ssh_runner.go:195] Run: systemctl --version
I1123 08:57:42.603410 62386 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:57:42.614510 62386 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:57:42.614601 62386 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:57:42.645967 62386 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:57:42.646003 62386 start.go:496] detecting cgroup driver to use...
I1123 08:57:42.646138 62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:42.678706 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:57:42.694705 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:57:42.713341 62386 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:57:42.713419 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:57:42.729085 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:42.747983 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:57:42.768036 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:42.784061 62386 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:57:42.803711 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:57:42.822385 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:57:42.837748 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:57:42.858942 62386 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:57:42.873841 62386 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1123 08:57:42.873924 62386 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1123 08:57:42.888503 62386 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:57:42.902894 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:43.087215 62386 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:57:43.137011 62386 start.go:496] detecting cgroup driver to use...
I1123 08:57:43.137115 62386 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1123 08:57:43.166541 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:43.198142 62386 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1123 08:57:43.220890 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:43.239791 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:43.260304 62386 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:57:43.296702 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:43.316993 62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:43.348493 62386 ssh_runner.go:195] Run: which cri-dockerd
I1123 08:57:43.353715 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1123 08:57:43.367872 62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1123 08:57:43.391806 62386 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1123 08:57:43.570922 62386 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1123 08:57:43.771497 62386 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1123 08:57:43.771641 62386 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1123 08:57:43.796840 62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:57:43.815699 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:43.997592 62386 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:57:44.541819 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:57:44.559735 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1123 08:57:44.577562 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:44.595133 62386 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1123 08:57:44.759253 62386 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1123 08:57:44.927897 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:45.126443 62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1123 08:57:45.161272 62386 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:45.179561 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:45.365439 62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:45.512591 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:45.537318 62386 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1123 08:57:45.537393 62386 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1123 08:57:45.546577 62386 start.go:564] Will wait 60s for crictl version
I1123 08:57:45.546657 62386 ssh_runner.go:195] Run: which crictl
I1123 08:57:45.553243 62386 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1123 08:57:45.597074 62386 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1123 08:57:45.597163 62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:45.640023 62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:45.668409 62386 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1123 08:57:45.671742 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:45.672152 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:45.672174 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:45.672386 62386 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I1123 08:57:45.677208 62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.39.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:45.697750 62386 out.go:179] - kubeadm.pod-network-cidr=10.42.0.0/16
I1123 08:57:42.471379 62480 out.go:252] * Restarting existing kvm2 VM for "default-k8s-diff-port-925051" ...
I1123 08:57:42.471439 62480 main.go:143] libmachine: starting domain...
I1123 08:57:42.471451 62480 main.go:143] libmachine: ensuring networks are active...
I1123 08:57:42.472371 62480 main.go:143] libmachine: Ensuring network default is active
I1123 08:57:42.473208 62480 main.go:143] libmachine: Ensuring network mk-default-k8s-diff-port-925051 is active
I1123 08:57:42.474158 62480 main.go:143] libmachine: getting domain XML...
I1123 08:57:42.476521 62480 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>default-k8s-diff-port-925051</name>
<uuid>faa8704c-25e4-4eae-b827-cb508c4f9f54</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/default-k8s-diff-port-925051.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:19:c7:db'/>
<source network='mk-default-k8s-diff-port-925051'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:fd:c0:c5'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1123 08:57:44.035948 62480 main.go:143] libmachine: waiting for domain to start...
I1123 08:57:44.037946 62480 main.go:143] libmachine: domain is now running
I1123 08:57:44.037965 62480 main.go:143] libmachine: waiting for IP...
I1123 08:57:44.039014 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.039860 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has current primary IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.039874 62480 main.go:143] libmachine: found domain IP: 192.168.83.137
I1123 08:57:44.039880 62480 main.go:143] libmachine: reserving static IP address...
I1123 08:57:44.040364 62480 main.go:143] libmachine: found host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:57:44.040404 62480 main.go:143] libmachine: skip adding static IP to network mk-default-k8s-diff-port-925051 - found existing host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"}
I1123 08:57:44.040416 62480 main.go:143] libmachine: reserved static IP address 192.168.83.137 for domain default-k8s-diff-port-925051
I1123 08:57:44.040421 62480 main.go:143] libmachine: waiting for SSH...
I1123 08:57:44.040425 62480 main.go:143] libmachine: Getting to WaitForSSH function...
I1123 08:57:44.043072 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.043526 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:57:44.043551 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.043747 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:44.044097 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:57:44.044119 62480 main.go:143] libmachine: About to run SSH command:
exit 0
W1123 08:57:43.874417 62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
I1123 08:57:44.875063 62034 node_ready.go:49] node "embed-certs-059363" is "Ready"
I1123 08:57:44.875101 62034 node_ready.go:38] duration metric: took 8.005319911s for node "embed-certs-059363" to be "Ready" ...
I1123 08:57:44.875126 62034 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:57:44.875194 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:44.908964 62034 api_server.go:72] duration metric: took 8.381553502s to wait for apiserver process to appear ...
I1123 08:57:44.908993 62034 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:57:44.909013 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:44.924580 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
ok
I1123 08:57:44.927212 62034 api_server.go:141] control plane version: v1.34.1
I1123 08:57:44.927254 62034 api_server.go:131] duration metric: took 18.252447ms to wait for apiserver health ...
I1123 08:57:44.927266 62034 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:57:44.936682 62034 system_pods.go:59] 8 kube-system pods found
I1123 08:57:44.936719 62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
I1123 08:57:44.936727 62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
I1123 08:57:44.936746 62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1123 08:57:44.936754 62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
I1123 08:57:44.936762 62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
I1123 08:57:44.936772 62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1123 08:57:44.936780 62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1123 08:57:44.936786 62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
I1123 08:57:44.936794 62034 system_pods.go:74] duration metric: took 9.520766ms to wait for pod list to return data ...
I1123 08:57:44.936804 62034 default_sa.go:34] waiting for default service account to be created ...
I1123 08:57:44.948188 62034 default_sa.go:45] found service account: "default"
I1123 08:57:44.948225 62034 default_sa.go:55] duration metric: took 11.401143ms for default service account to be created ...
I1123 08:57:44.948255 62034 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 08:57:44.951719 62034 system_pods.go:86] 8 kube-system pods found
I1123 08:57:44.951754 62034 system_pods.go:89] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
I1123 08:57:44.951774 62034 system_pods.go:89] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
I1123 08:57:44.951787 62034 system_pods.go:89] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1123 08:57:44.951803 62034 system_pods.go:89] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
I1123 08:57:44.951812 62034 system_pods.go:89] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
I1123 08:57:44.951821 62034 system_pods.go:89] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1123 08:57:44.951837 62034 system_pods.go:89] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1123 08:57:44.951850 62034 system_pods.go:89] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
I1123 08:57:44.951862 62034 system_pods.go:126] duration metric: took 3.598572ms to wait for k8s-apps to be running ...
I1123 08:57:44.951872 62034 system_svc.go:44] waiting for kubelet service to be running ....
I1123 08:57:44.951940 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:57:44.981007 62034 system_svc.go:56] duration metric: took 29.122206ms WaitForService to wait for kubelet
I1123 08:57:44.981059 62034 kubeadm.go:587] duration metric: took 8.453653674s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:57:44.981082 62034 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:57:44.985604 62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1123 08:57:44.985627 62034 node_conditions.go:123] node cpu capacity is 2
I1123 08:57:44.985639 62034 node_conditions.go:105] duration metric: took 4.549928ms to run NodePressure ...
I1123 08:57:44.985653 62034 start.go:242] waiting for startup goroutines ...
I1123 08:57:44.985663 62034 start.go:247] waiting for cluster config update ...
I1123 08:57:44.985678 62034 start.go:256] writing updated cluster config ...
I1123 08:57:44.986007 62034 ssh_runner.go:195] Run: rm -f paused
I1123 08:57:44.992429 62034 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:57:44.997825 62034 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.005294 62034 pod_ready.go:94] pod "coredns-66bc5c9577-665gz" is "Ready"
I1123 08:57:45.005321 62034 pod_ready.go:86] duration metric: took 7.470836ms for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.008602 62034 pod_ready.go:83] waiting for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.017355 62034 pod_ready.go:94] pod "etcd-embed-certs-059363" is "Ready"
I1123 08:57:45.017385 62034 pod_ready.go:86] duration metric: took 8.758566ms for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.020737 62034 pod_ready.go:83] waiting for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
W1123 08:57:47.036716 62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
I1123 08:57:45.699160 62386 kubeadm.go:884] updating cluster {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144
MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:57:45.699335 62386 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:57:45.699438 62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:45.722240 62386 docker.go:691] Got preloaded images:
I1123 08:57:45.722266 62386 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.1 wasn't preloaded
I1123 08:57:45.722318 62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1123 08:57:45.737539 62386 ssh_runner.go:195] Run: which lz4
I1123 08:57:45.742521 62386 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
I1123 08:57:45.748122 62386 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/preloaded.tar.lz4': No such file or directory
I1123 08:57:45.748156 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (353378914 bytes)
I1123 08:57:47.397908 62386 docker.go:655] duration metric: took 1.655425847s to copy over tarball
I1123 08:57:47.398050 62386 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
I1123 08:57:49.041182 62386 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.643095229s)
I1123 08:57:49.041212 62386 ssh_runner.go:146] rm: /preloaded.tar.lz4
I1123 08:57:49.084378 62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1123 08:57:49.103760 62386 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
W1123 08:57:49.601859 62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
I1123 08:57:50.104106 62034 pod_ready.go:94] pod "kube-apiserver-embed-certs-059363" is "Ready"
I1123 08:57:50.104158 62034 pod_ready.go:86] duration metric: took 5.08337291s for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.107546 62034 pod_ready.go:83] waiting for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.115455 62034 pod_ready.go:94] pod "kube-controller-manager-embed-certs-059363" is "Ready"
I1123 08:57:50.115500 62034 pod_ready.go:86] duration metric: took 7.928459ms for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.119972 62034 pod_ready.go:83] waiting for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.127595 62034 pod_ready.go:94] pod "kube-proxy-sjvcr" is "Ready"
I1123 08:57:50.127628 62034 pod_ready.go:86] duration metric: took 7.626091ms for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.773984 62034 pod_ready.go:83] waiting for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.786424 62034 pod_ready.go:94] pod "kube-scheduler-embed-certs-059363" is "Ready"
I1123 08:57:50.786450 62034 pod_ready.go:86] duration metric: took 12.434457ms for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.786464 62034 pod_ready.go:40] duration metric: took 5.79400818s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:57:50.838926 62034 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 08:57:50.918780 62034 out.go:179] * Done! kubectl is now configured to use "embed-certs-059363" cluster and "default" namespace by default
I1123 08:57:47.146461 62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
I1123 08:57:49.133800 62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:57:49.157740 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:49.330628 62386 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:57:52.066864 62386 ssh_runner.go:235] Completed: sudo systemctl restart docker: (2.736192658s)
I1123 08:57:52.066973 62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:52.092926 62386 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1123 08:57:52.092950 62386 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:57:52.092962 62386 kubeadm.go:935] updating node { 192.168.39.87 8443 v1.34.1 docker true true} ...
I1123 08:57:52.093116 62386 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=newest-cni-078196 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.87
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:57:52.093201 62386 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1123 08:57:52.154769 62386 cni.go:84] Creating CNI manager for ""
I1123 08:57:52.154816 62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:52.154857 62386 kubeadm.go:85] Using pod CIDR: 10.42.0.0/16
I1123 08:57:52.154889 62386 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.39.87 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-078196 NodeName:newest-cni-078196 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.87"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.87 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:57:52.155043 62386 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.87
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "newest-cni-078196"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.87"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.87"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.42.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.42.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:57:52.155124 62386 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:57:52.170649 62386 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:57:52.170739 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:57:52.186437 62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I1123 08:57:52.209956 62386 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:57:52.238732 62386 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2219 bytes)
I1123 08:57:52.263556 62386 ssh_runner.go:195] Run: grep 192.168.39.87 control-plane.minikube.internal$ /etc/hosts
I1123 08:57:52.269016 62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.39.87 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:52.291438 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:52.468471 62386 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:57:52.523082 62386 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196 for IP: 192.168.39.87
I1123 08:57:52.523106 62386 certs.go:195] generating shared ca certs ...
I1123 08:57:52.523125 62386 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.523320 62386 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
I1123 08:57:52.523383 62386 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
I1123 08:57:52.523392 62386 certs.go:257] generating profile certs ...
I1123 08:57:52.523458 62386 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key
I1123 08:57:52.523471 62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt with IP's: []
I1123 08:57:52.657113 62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt ...
I1123 08:57:52.657156 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt: {Name:mkd4a2297a388c5353f24d63692a9eca2de3895a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.657425 62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key ...
I1123 08:57:52.657447 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key: {Name:mk97d3b4437d9c086044675cf55d01816d40a112 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.657646 62386 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4
I1123 08:57:52.657673 62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.39.87]
I1123 08:57:52.753683 62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 ...
I1123 08:57:52.753714 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4: {Name:mkbf555d613a4fba5c26a5d85e984e69fa19d66f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.753910 62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 ...
I1123 08:57:52.753929 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4: {Name:mk86a1d3d78eb2290d7da0f96ec23ec9d83a7382 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.754031 62386 certs.go:382] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt
I1123 08:57:52.754133 62386 certs.go:386] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key
I1123 08:57:52.754190 62386 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key
I1123 08:57:52.754206 62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt with IP's: []
I1123 08:57:52.860620 62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt ...
I1123 08:57:52.860647 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt: {Name:mk8319204c666212061b0efe79d3f0da238ee7e7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.860851 62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key ...
I1123 08:57:52.860877 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key: {Name:mk66bf3abe86bc12c3af12e371d390dfcbb94d6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.861117 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
W1123 08:57:52.861164 62386 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
I1123 08:57:52.861180 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:57:52.861225 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
I1123 08:57:52.861277 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
I1123 08:57:52.861316 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
I1123 08:57:52.861376 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:52.861976 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:57:52.899377 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:57:52.931761 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:57:52.966281 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:57:53.007390 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1123 08:57:53.044942 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1123 08:57:53.087195 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:57:53.132412 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1123 08:57:53.183547 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:57:53.239854 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
I1123 08:57:53.286333 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
I1123 08:57:53.334114 62386 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:57:53.368550 62386 ssh_runner.go:195] Run: openssl version
I1123 08:57:53.379200 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
I1123 08:57:53.402310 62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
I1123 08:57:53.409135 62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
I1123 08:57:53.409206 62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
I1123 08:57:53.420776 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:57:53.439668 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:57:53.455152 62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:53.463920 62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:53.463999 62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:53.476317 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:57:53.500779 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
I1123 08:57:53.518199 62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
I1123 08:57:53.524305 62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
I1123 08:57:53.524381 62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
I1123 08:57:53.535728 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
I1123 08:57:53.552096 62386 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:57:53.560216 62386 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1123 08:57:53.560306 62386 kubeadm.go:401] StartCluster: {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 Mou
ntOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:53.560470 62386 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1123 08:57:53.580412 62386 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:57:53.596570 62386 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:57:53.611293 62386 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:57:53.630652 62386 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:57:53.630673 62386 kubeadm.go:158] found existing configuration files:
I1123 08:57:53.630721 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:57:53.648350 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:57:53.648419 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:57:53.668086 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:57:53.682346 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:57:53.682427 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:57:53.696036 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:57:53.708650 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:57:53.708729 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:57:53.721869 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:57:53.733930 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:57:53.734006 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:57:53.747563 62386 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1123 08:57:53.803699 62386 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1123 08:57:53.803788 62386 kubeadm.go:319] [preflight] Running pre-flight checks
I1123 08:57:53.933708 62386 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1123 08:57:53.933907 62386 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1123 08:57:53.934039 62386 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1123 08:57:53.957595 62386 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1123 08:57:53.960282 62386 out.go:252] - Generating certificates and keys ...
I1123 08:57:53.960381 62386 kubeadm.go:319] [certs] Using existing ca certificate authority
I1123 08:57:53.960461 62386 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1123 08:57:53.226464 62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
I1123 08:57:54.308839 62386 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1123 08:57:54.462473 62386 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1123 08:57:54.656673 62386 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1123 08:57:55.051656 62386 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1123 08:57:55.893313 62386 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1123 08:57:55.893649 62386 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
I1123 08:57:56.010218 62386 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1123 08:57:56.010458 62386 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
I1123 08:57:56.117087 62386 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1123 08:57:56.436611 62386 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1123 08:57:56.745597 62386 kubeadm.go:319] [certs] Generating "sa" key and public key
I1123 08:57:56.745835 62386 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1123 08:57:56.988789 62386 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1123 08:57:57.476516 62386 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1123 08:57:57.662890 62386 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1123 08:57:58.001771 62386 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1123 08:57:58.199479 62386 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1123 08:57:58.201506 62386 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1123 08:57:58.204309 62386 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1123 08:57:58.206280 62386 out.go:252] - Booting up control plane ...
I1123 08:57:58.206413 62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:57:58.206524 62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:57:58.206622 62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:57:58.225366 62386 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:57:58.225656 62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1123 08:57:58.233945 62386 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1123 08:57:58.234118 62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:57:58.234179 62386 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:57:58.435406 62386 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1123 08:57:58.435734 62386 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1123 08:57:57.259625 62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: connection refused
I1123 08:58:00.375540 62480 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:58:00.379895 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.380474 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.380511 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.380795 62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
I1123 08:58:00.381087 62480 machine.go:94] provisionDockerMachine start ...
I1123 08:58:00.384347 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.384859 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.384898 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.385108 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:00.385436 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:00.385456 62480 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:58:00.505124 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1123 08:58:00.505170 62480 buildroot.go:166] provisioning hostname "default-k8s-diff-port-925051"
I1123 08:58:00.509221 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.509702 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.509735 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.509925 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:00.510144 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:00.510161 62480 main.go:143] libmachine: About to run SSH command:
sudo hostname default-k8s-diff-port-925051 && echo "default-k8s-diff-port-925051" | sudo tee /etc/hostname
I1123 08:58:00.644600 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-925051
I1123 08:58:00.648066 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.648604 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.648630 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.648845 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:00.649045 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:00.649060 62480 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sdefault-k8s-diff-port-925051' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-925051/g' /etc/hosts;
else
echo '127.0.1.1 default-k8s-diff-port-925051' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:58:00.768996 62480 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:58:00.769030 62480 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
I1123 08:58:00.769067 62480 buildroot.go:174] setting up certificates
I1123 08:58:00.769088 62480 provision.go:84] configureAuth start
I1123 08:58:00.772355 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.772869 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.772909 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.775615 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.776035 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.776086 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.776228 62480 provision.go:143] copyHostCerts
I1123 08:58:00.776306 62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
I1123 08:58:00.776319 62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
I1123 08:58:00.776391 62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
I1123 08:58:00.776518 62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
I1123 08:58:00.776529 62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
I1123 08:58:00.776558 62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
I1123 08:58:00.776642 62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
I1123 08:58:00.776653 62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
I1123 08:58:00.776678 62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
I1123 08:58:00.776751 62480 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-925051 san=[127.0.0.1 192.168.83.137 default-k8s-diff-port-925051 localhost minikube]
I1123 08:58:00.949651 62480 provision.go:177] copyRemoteCerts
I1123 08:58:00.949711 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:58:00.952558 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.952960 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.952982 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.953136 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:01.044089 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1123 08:58:01.077898 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 08:58:01.115919 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
I1123 08:58:01.157254 62480 provision.go:87] duration metric: took 388.131412ms to configureAuth
I1123 08:58:01.157285 62480 buildroot.go:189] setting minikube options for container-runtime
I1123 08:58:01.157510 62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:58:01.160663 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.161248 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:01.161295 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.161496 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:01.161777 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:01.161792 62480 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1123 08:58:01.278322 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1123 08:58:01.278347 62480 buildroot.go:70] root file system type: tmpfs
I1123 08:58:01.278524 62480 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1123 08:58:01.281592 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.282050 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:01.282098 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.282395 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:01.282601 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:01.282650 62480 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1123 08:58:01.426254 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1123 08:58:01.429123 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.429531 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:01.429561 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.429727 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:01.429945 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:01.429968 62480 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1123 08:57:59.438296 62386 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.003129845s
I1123 08:57:59.442059 62386 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1123 08:57:59.442209 62386 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.39.87:8443/livez
I1123 08:57:59.442348 62386 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1123 08:57:59.442479 62386 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1123 08:58:01.938904 62386 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 2.497307336s
I1123 08:58:03.405770 62386 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 3.965160338s
I1123 08:58:05.442827 62386 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 6.002687393s
I1123 08:58:05.466318 62386 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:58:05.495033 62386 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:58:05.522725 62386 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:58:05.523012 62386 kubeadm.go:319] [mark-control-plane] Marking the node newest-cni-078196 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:58:05.543260 62386 kubeadm.go:319] [bootstrap-token] Using token: dgrodg.6ciokz1biodl2yci
I1123 08:58:02.622394 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1123 08:58:02.622428 62480 machine.go:97] duration metric: took 2.24132298s to provisionDockerMachine
I1123 08:58:02.622443 62480 start.go:293] postStartSetup for "default-k8s-diff-port-925051" (driver="kvm2")
I1123 08:58:02.622457 62480 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:58:02.622522 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:58:02.625753 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.626334 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.626374 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.626567 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:02.732392 62480 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:58:02.737975 62480 info.go:137] Remote host: Buildroot 2025.02
I1123 08:58:02.738010 62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
I1123 08:58:02.738111 62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
I1123 08:58:02.738225 62480 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
I1123 08:58:02.738341 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:58:02.755815 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:58:02.790325 62480 start.go:296] duration metric: took 167.864143ms for postStartSetup
I1123 08:58:02.790381 62480 fix.go:56] duration metric: took 20.323185295s for fixHost
I1123 08:58:02.793471 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.793912 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.793950 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.794223 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:02.794447 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:02.794458 62480 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1123 08:58:02.907310 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888282.872914256
I1123 08:58:02.907338 62480 fix.go:216] guest clock: 1763888282.872914256
I1123 08:58:02.907348 62480 fix.go:229] Guest: 2025-11-23 08:58:02.872914256 +0000 UTC Remote: 2025-11-23 08:58:02.790385341 +0000 UTC m=+45.999028572 (delta=82.528915ms)
I1123 08:58:02.907369 62480 fix.go:200] guest clock delta is within tolerance: 82.528915ms
I1123 08:58:02.907375 62480 start.go:83] releasing machines lock for "default-k8s-diff-port-925051", held for 20.440202624s
I1123 08:58:02.910604 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.911104 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.911130 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.911758 62480 ssh_runner.go:195] Run: cat /version.json
I1123 08:58:02.911816 62480 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:58:02.915121 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915430 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915677 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.915710 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915907 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.915942 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915932 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:02.916129 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:03.020815 62480 ssh_runner.go:195] Run: systemctl --version
I1123 08:58:03.028066 62480 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:58:03.036089 62480 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:58:03.036168 62480 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:58:03.059461 62480 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:58:03.059497 62480 start.go:496] detecting cgroup driver to use...
I1123 08:58:03.059639 62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:58:03.085945 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:58:03.100188 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:58:03.114121 62480 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:58:03.114197 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:58:03.128502 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:58:03.141941 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:58:03.155742 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:58:03.170251 62480 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:58:03.185473 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:58:03.199212 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:58:03.212441 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:58:03.225457 62480 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:58:03.237735 62480 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1123 08:58:03.237807 62480 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1123 08:58:03.251616 62480 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:58:03.264293 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:03.431052 62480 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:58:03.484769 62480 start.go:496] detecting cgroup driver to use...
I1123 08:58:03.484887 62480 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1123 08:58:03.515067 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:58:03.538674 62480 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1123 08:58:03.566269 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:58:03.585483 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:58:03.603778 62480 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:58:03.640497 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:58:03.659085 62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:58:03.687162 62480 ssh_runner.go:195] Run: which cri-dockerd
I1123 08:58:03.694175 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1123 08:58:03.712519 62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1123 08:58:03.741521 62480 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1123 08:58:03.916394 62480 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1123 08:58:04.069031 62480 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1123 08:58:04.069190 62480 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1123 08:58:04.093301 62480 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:58:04.109417 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:04.272454 62480 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:58:04.931701 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:58:04.948944 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1123 08:58:04.971544 62480 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1123 08:58:05.005474 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:58:05.031097 62480 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1123 08:58:05.200507 62480 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1123 08:58:05.394816 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:05.619873 62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1123 08:58:05.666855 62480 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:58:05.685142 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:05.848671 62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:58:05.996045 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:58:06.018056 62480 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1123 08:58:06.018168 62480 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1123 08:58:06.026546 62480 start.go:564] Will wait 60s for crictl version
I1123 08:58:06.026630 62480 ssh_runner.go:195] Run: which crictl
I1123 08:58:06.032819 62480 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1123 08:58:06.084168 62480 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1123 08:58:06.084266 62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:58:06.126882 62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:58:06.163943 62480 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1123 08:58:06.168664 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:06.169284 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:06.169324 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:06.169553 62480 ssh_runner.go:195] Run: grep 192.168.83.1 host.minikube.internal$ /etc/hosts
I1123 08:58:06.176801 62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.83.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:58:06.201834 62480 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubernete
sVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: N
etwork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:58:06.201979 62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:58:06.202051 62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:58:06.228393 62480 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:58:06.228418 62480 docker.go:621] Images already preloaded, skipping extraction
I1123 08:58:06.228478 62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:58:06.253832 62480 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:58:06.253872 62480 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:58:06.253886 62480 kubeadm.go:935] updating node { 192.168.83.137 8444 v1.34.1 docker true true} ...
I1123 08:58:06.254046 62480 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-925051 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.83.137
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:58:06.254117 62480 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1123 08:58:06.333361 62480 cni.go:84] Creating CNI manager for ""
I1123 08:58:06.333408 62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:58:06.333432 62480 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:58:06.333457 62480 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.83.137 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-925051 NodeName:default-k8s-diff-port-925051 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.83.137"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.83.137 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/c
erts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:58:06.333702 62480 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.83.137
bindPort: 8444
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "default-k8s-diff-port-925051"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.83.137"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.83.137"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8444
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:58:06.333784 62480 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:58:06.356565 62480 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:58:06.356666 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:58:06.376736 62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (329 bytes)
I1123 08:58:06.412797 62480 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:58:06.447785 62480 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2235 bytes)
I1123 08:58:06.486793 62480 ssh_runner.go:195] Run: grep 192.168.83.137 control-plane.minikube.internal$ /etc/hosts
I1123 08:58:06.494943 62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.83.137 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:58:06.522673 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:06.760714 62480 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:58:06.816865 62480 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051 for IP: 192.168.83.137
I1123 08:58:06.817014 62480 certs.go:195] generating shared ca certs ...
I1123 08:58:06.817069 62480 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:58:06.817298 62480 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
I1123 08:58:06.817470 62480 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
I1123 08:58:06.817524 62480 certs.go:257] generating profile certs ...
I1123 08:58:06.817689 62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/client.key
I1123 08:58:06.817768 62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/apiserver.key.3e63079d
I1123 08:58:06.817847 62480 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/proxy-client.key
I1123 08:58:06.818039 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
W1123 08:58:06.818089 62480 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
I1123 08:58:06.818100 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:58:06.818136 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
I1123 08:58:06.818179 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
I1123 08:58:06.818209 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
I1123 08:58:06.818301 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:58:06.819187 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:58:05.545959 62386 out.go:252] - Configuring RBAC rules ...
I1123 08:58:05.546132 62386 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:58:05.554804 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:58:05.569723 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:58:05.574634 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:58:05.579213 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:58:05.585176 62386 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:58:05.855390 62386 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:58:06.305498 62386 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:58:06.860572 62386 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:58:06.862132 62386 kubeadm.go:319]
I1123 08:58:06.862300 62386 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:58:06.862315 62386 kubeadm.go:319]
I1123 08:58:06.862459 62386 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:58:06.862488 62386 kubeadm.go:319]
I1123 08:58:06.862544 62386 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:58:06.862628 62386 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:58:06.862700 62386 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:58:06.862710 62386 kubeadm.go:319]
I1123 08:58:06.862788 62386 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:58:06.862797 62386 kubeadm.go:319]
I1123 08:58:06.862866 62386 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:58:06.862875 62386 kubeadm.go:319]
I1123 08:58:06.862984 62386 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:58:06.863098 62386 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:58:06.863220 62386 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:58:06.863243 62386 kubeadm.go:319]
I1123 08:58:06.863353 62386 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:58:06.863463 62386 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:58:06.863473 62386 kubeadm.go:319]
I1123 08:58:06.863589 62386 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
I1123 08:58:06.863736 62386 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a \
I1123 08:58:06.863769 62386 kubeadm.go:319] --control-plane
I1123 08:58:06.863778 62386 kubeadm.go:319]
I1123 08:58:06.863904 62386 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:58:06.863913 62386 kubeadm.go:319]
I1123 08:58:06.864056 62386 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
I1123 08:58:06.864229 62386 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a
I1123 08:58:06.865336 62386 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:58:06.865367 62386 cni.go:84] Creating CNI manager for ""
I1123 08:58:06.865396 62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:58:06.867294 62386 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
==> Docker <==
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.403847294Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530278754Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530380987Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 23 08:57:20 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:20Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541222738Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541313635Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544639412Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544665809Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.895558802Z" level=info msg="ignoring event" container=5858e2fd1e0f544e020a845d1e9aa15e86c2117c0ebff9dfe1b6f4d96f844434 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Nov 23 08:57:21 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/f70de02d77443d2041cfe03c25cb36b6f758dd4e678353419ea55ac106e8b68a/resolv.conf as [nameserver 10.96.0.10 search kubernetes-dashboard.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Nov 23 08:57:32 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:32.990740143Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076597693Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076828182Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 23 08:57:33 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:33Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.550212350Z" level=info msg="ignoring event" container=1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-lp6jk_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"9a49ebff42d5eef5c3e23db2e1ab337396080dea6c13220062ba5e0e48a95cc8\""
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.760065184Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863488316Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863610785Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 23 08:58:08 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:08Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.897944813Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.899313923Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914470304Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914503647Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
371de4a468901 6e38f40d628db 1 second ago Running storage-provisioner 2 a97e7e7100c3a storage-provisioner kube-system
57ebcdb97431d kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 49 seconds ago Running kubernetes-dashboard 0 644b3c0a17fe8 kubernetes-dashboard-855c9754f9-zh9mv kubernetes-dashboard
58768e42678e9 56cc512116c8f 51 seconds ago Running busybox 1 c39a5f42630b0 busybox default
f7e183883855c 52546a367cc9e 51 seconds ago Running coredns 1 86281d14c8f1e coredns-66bc5c9577-nj6pk kube-system
1f0a2f0aefa9b 6e38f40d628db About a minute ago Exited storage-provisioner 1 a97e7e7100c3a storage-provisioner kube-system
8c0537e27a6fb fc25172553d79 About a minute ago Running kube-proxy 1 dd983c999b8f4 kube-proxy-wlb9w kube-system
8deb34aee6ea1 5f1f5298c888d About a minute ago Running etcd 1 ccce046e98c9b etcd-no-preload-019660 kube-system
1a4750ff7e8cb c80c8dbafe7dd About a minute ago Running kube-controller-manager 1 e18e6fb700516 kube-controller-manager-no-preload-019660 kube-system
6929fc4394d1d c3994bc696102 About a minute ago Running kube-apiserver 1 b493d9303993d kube-apiserver-no-preload-019660 kube-system
266be5a40ca65 7dd6aaa1717ab About a minute ago Running kube-scheduler 1 a1f3f18719102 kube-scheduler-no-preload-019660 kube-system
7e459e5ac3043 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 c0e79a536f316 busybox default
b5d2ec6064039 52546a367cc9e 2 minutes ago Exited coredns 0 92a72987832f3 coredns-66bc5c9577-nj6pk kube-system
4aea324009fdd fc25172553d79 2 minutes ago Exited kube-proxy 0 adcf7215f30c5 kube-proxy-wlb9w kube-system
57bb06d26ab69 7dd6aaa1717ab 2 minutes ago Exited kube-scheduler 0 0e3f3ba5c2b8c kube-scheduler-no-preload-019660 kube-system
78433f5a1dee5 5f1f5298c888d 2 minutes ago Exited etcd 0 c90dfb42b9b72 etcd-no-preload-019660 kube-system
e0963762dabe6 c80c8dbafe7dd 2 minutes ago Exited kube-controller-manager 0 796e38a439eca kube-controller-manager-no-preload-019660 kube-system
51985d9c2b5e4 c3994bc696102 2 minutes ago Exited kube-apiserver 0 8ec1927039422 kube-apiserver-no-preload-019660 kube-system
==> coredns [b5d2ec606403] <==
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
[INFO] Reloading complete
[INFO] 127.0.0.1:42110 - 29445 "HINFO IN 9017480915883545082.4400091200596631812. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.103448715s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [f7e183883855] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:55083 - 4317 "HINFO IN 4704850718228764652.4547352497864188913. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.118220473s
==> describe nodes <==
Name: no-preload-019660
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=no-preload-019660
kubernetes.io/os=linux
minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e
minikube.k8s.io/name=no-preload-019660
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T08_55_22_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 08:55:18 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: no-preload-019660
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 08:58:07 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:55:14 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:55:14 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:55:14 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:57:11 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.50.40
Hostname: no-preload-019660
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: 5db77235f15f4a52ad7c561433b2bbe5
System UUID: 5db77235-f15f-4a52-ad7c-561433b2bbe5
Boot ID: 7c4938cf-e087-4d48-94a0-7660c53890e7
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.1
Kubelet Version: v1.34.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m4s
kube-system coredns-66bc5c9577-nj6pk 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m41s
kube-system etcd-no-preload-019660 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m47s
kube-system kube-apiserver-no-preload-019660 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m47s
kube-system kube-controller-manager-no-preload-019660 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m47s
kube-system kube-proxy-wlb9w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m42s
kube-system kube-scheduler-no-preload-019660 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m47s
kube-system metrics-server-746fcd58dc-tg8q5 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 114s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m39s
kubernetes-dashboard dashboard-metrics-scraper-6ffb444bf9-4965t 0 (0%) 0 (0%) 0 (0%) 0 (0%) 62s
kubernetes-dashboard kubernetes-dashboard-855c9754f9-zh9mv 0 (0%) 0 (0%) 0 (0%) 0 (0%) 62s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m39s kube-proxy
Normal Starting 65s kube-proxy
Normal NodeHasSufficientMemory 2m56s (x8 over 2m56s) kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m56s (x8 over 2m56s) kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m56s (x7 over 2m56s) kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m56s kubelet Updated Node Allocatable limit across pods
Normal Starting 2m47s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 2m47s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2m47s kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m47s kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m47s kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
Normal NodeReady 2m43s kubelet Node no-preload-019660 status is now: NodeReady
Normal RegisteredNode 2m42s node-controller Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
Normal Starting 74s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 74s kubelet Updated Node Allocatable limit across pods
Normal NodeHasNoDiskPressure 73s (x8 over 74s) kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 73s (x7 over 74s) kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
Normal NodeHasSufficientMemory 73s (x8 over 74s) kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Warning Rebooted 68s kubelet Node no-preload-019660 has been rebooted, boot id: 7c4938cf-e087-4d48-94a0-7660c53890e7
Normal RegisteredNode 65s node-controller Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
Normal Starting 2s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 2s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2s kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2s kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2s kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
==> dmesg <==
[Nov23 08:56] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.001555] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.004890] (rpcbind)[121]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.922269] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.557715] kauditd_printk_skb: 29 callbacks suppressed
[ +0.102404] kauditd_printk_skb: 421 callbacks suppressed
[Nov23 08:57] kauditd_printk_skb: 165 callbacks suppressed
[ +4.416704] kauditd_printk_skb: 134 callbacks suppressed
[ +0.028951] kauditd_printk_skb: 144 callbacks suppressed
[ +1.212600] kauditd_printk_skb: 93 callbacks suppressed
[ +0.188677] kauditd_printk_skb: 78 callbacks suppressed
[Nov23 08:58] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [78433f5a1dee] <==
{"level":"info","ts":"2025-11-23T08:55:27.960210Z","caller":"traceutil/trace.go:172","msg":"trace[1913795349] transaction","detail":"{read_only:false; response_revision:359; number_of_response:1; }","duration":"132.125474ms","start":"2025-11-23T08:55:27.828070Z","end":"2025-11-23T08:55:27.960197Z","steps":["trace[1913795349] 'process raft request' (duration: 130.470237ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T08:55:27.961326Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"115.093447ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/service-cidrs-controller\" limit:1 ","response":"range_response_count:1 size:214"}
{"level":"info","ts":"2025-11-23T08:55:27.961420Z","caller":"traceutil/trace.go:172","msg":"trace[1979015044] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/service-cidrs-controller; range_end:; response_count:1; response_revision:360; }","duration":"115.232691ms","start":"2025-11-23T08:55:27.846179Z","end":"2025-11-23T08:55:27.961412Z","steps":["trace[1979015044] 'agreement among raft nodes before linearized reading' (duration: 114.979531ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:27.964671Z","caller":"traceutil/trace.go:172","msg":"trace[1629415560] transaction","detail":"{read_only:false; response_revision:361; number_of_response:1; }","duration":"113.511815ms","start":"2025-11-23T08:55:27.851149Z","end":"2025-11-23T08:55:27.964661Z","steps":["trace[1629415560] 'process raft request' (duration: 111.933576ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:27.965851Z","caller":"traceutil/trace.go:172","msg":"trace[339398896] transaction","detail":"{read_only:false; response_revision:362; number_of_response:1; }","duration":"103.77975ms","start":"2025-11-23T08:55:27.862061Z","end":"2025-11-23T08:55:27.965841Z","steps":["trace[339398896] 'process raft request' (duration: 102.247209ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:52.232221Z","caller":"traceutil/trace.go:172","msg":"trace[991594023] transaction","detail":"{read_only:false; response_revision:463; number_of_response:1; }","duration":"138.295615ms","start":"2025-11-23T08:55:52.093898Z","end":"2025-11-23T08:55:52.232193Z","steps":["trace[991594023] 'process raft request' (duration: 138.148011ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:53.110050Z","caller":"traceutil/trace.go:172","msg":"trace[1408655835] transaction","detail":"{read_only:false; response_revision:464; number_of_response:1; }","duration":"111.465311ms","start":"2025-11-23T08:55:52.998570Z","end":"2025-11-23T08:55:53.110036Z","steps":["trace[1408655835] 'process raft request' (duration: 111.386468ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:56:16.343294Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-11-23T08:56:16.343638Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
{"level":"error","ts":"2025-11-23T08:56:16.344971Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-11-23T08:56:23.350843Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-11-23T08:56:23.350926Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-11-23T08:56:23.350948Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"113a167c41258c81","current-leader-member-id":"113a167c41258c81"}
{"level":"info","ts":"2025-11-23T08:56:23.351067Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
{"level":"info","ts":"2025-11-23T08:56:23.351076Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"warn","ts":"2025-11-23T08:56:23.353233Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-11-23T08:56:23.353335Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-11-23T08:56:23.353344Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-11-23T08:56:23.353381Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
{"level":"warn","ts":"2025-11-23T08:56:23.353419Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
{"level":"error","ts":"2025-11-23T08:56:23.353428Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-11-23T08:56:23.359157Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.50.40:2380"}
{"level":"error","ts":"2025-11-23T08:56:23.359253Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-11-23T08:56:23.359488Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.50.40:2380"}
{"level":"info","ts":"2025-11-23T08:56:23.359540Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
==> etcd [8deb34aee6ea] <==
{"level":"warn","ts":"2025-11-23T08:57:00.099710Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44330","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.113877Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44336","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.136374Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44356","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.145346Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44368","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.154857Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44394","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.171909Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44414","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.185801Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44422","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.191640Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44442","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.202370Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44456","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.212078Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44464","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.224299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44490","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.239703Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44498","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.248343Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44522","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.259201Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44546","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.280884Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44576","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.303755Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44586","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.322303Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44610","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.379317Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44628","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-11-23T08:57:16.914297Z","caller":"traceutil/trace.go:172","msg":"trace[282693566] transaction","detail":"{read_only:false; response_revision:710; number_of_response:1; }","duration":"165.899912ms","start":"2025-11-23T08:57:16.748378Z","end":"2025-11-23T08:57:16.914278Z","steps":["trace[282693566] 'process raft request' (duration: 165.731904ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:57:17.891916Z","caller":"traceutil/trace.go:172","msg":"trace[845827594] linearizableReadLoop","detail":"{readStateIndex:756; appliedIndex:756; }","duration":"162.635779ms","start":"2025-11-23T08:57:17.729260Z","end":"2025-11-23T08:57:17.891896Z","steps":["trace[845827594] 'read index received' (duration: 162.630099ms)","trace[845827594] 'applied index is now lower than readState.Index' (duration: 4.7µs)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T08:57:17.892195Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"162.847621ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-23T08:57:17.892577Z","caller":"traceutil/trace.go:172","msg":"trace[1595377469] transaction","detail":"{read_only:false; response_revision:712; number_of_response:1; }","duration":"262.918033ms","start":"2025-11-23T08:57:17.629632Z","end":"2025-11-23T08:57:17.892550Z","steps":["trace[1595377469] 'process raft request' (duration: 262.820051ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:57:17.892238Z","caller":"traceutil/trace.go:172","msg":"trace[1998076635] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:711; }","duration":"162.976ms","start":"2025-11-23T08:57:17.729254Z","end":"2025-11-23T08:57:17.892230Z","steps":["trace[1998076635] 'agreement among raft nodes before linearized reading' (duration: 162.824778ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T08:57:17.894716Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"130.045976ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-23T08:57:17.894762Z","caller":"traceutil/trace.go:172","msg":"trace[1496763416] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:712; }","duration":"130.105624ms","start":"2025-11-23T08:57:17.764650Z","end":"2025-11-23T08:57:17.894756Z","steps":["trace[1496763416] 'agreement among raft nodes before linearized reading' (duration: 130.023549ms)"],"step_count":1}
==> kernel <==
08:58:09 up 1 min, 0 users, load average: 1.58, 0.55, 0.20
Linux no-preload-019660 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Nov 19 01:10:03 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [51985d9c2b5e] <==
W1123 08:56:25.707408 1 logging.go:55] [core] [Channel #135 SubChannel #137]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.731493 1 logging.go:55] [core] [Channel #63 SubChannel #65]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.801488 1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.835630 1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.837271 1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.885167 1 logging.go:55] [core] [Channel #47 SubChannel #49]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.919480 1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.953337 1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.992450 1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.001050 1 logging.go:55] [core] [Channel #175 SubChannel #177]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.027017 1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.043092 1 logging.go:55] [core] [Channel #159 SubChannel #161]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.075821 1 logging.go:55] [core] [Channel #83 SubChannel #85]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.087192 1 logging.go:55] [core] [Channel #67 SubChannel #69]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.108299 1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.143125 1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.143847 1 logging.go:55] [core] [Channel #27 SubChannel #29]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.168146 1 logging.go:55] [core] [Channel #31 SubChannel #33]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.194296 1 logging.go:55] [core] [Channel #55 SubChannel #57]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.217089 1 logging.go:55] [core] [Channel #143 SubChannel #145]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.284415 1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.304057 1 logging.go:55] [core] [Channel #127 SubChannel #129]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.351096 1 logging.go:55] [core] [Channel #151 SubChannel #153]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.358315 1 logging.go:55] [core] [Channel #107 SubChannel #109]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.398513 1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [6929fc4394d1] <==
W1123 08:57:02.240589 1 handler_proxy.go:99] no RequestInfo found in the context
E1123 08:57:02.241169 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1123 08:57:02.242304 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1123 08:57:03.447397 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1123 08:57:03.566737 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1123 08:57:03.633482 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 08:57:03.665173 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 08:57:04.456742 1 controller.go:667] quota admission added evaluator for: endpoints
I1123 08:57:04.822296 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 08:57:04.922886 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1123 08:57:06.855489 1 controller.go:667] quota admission added evaluator for: namespaces
I1123 08:57:07.352680 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.100.252.132"}
I1123 08:57:07.386303 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.100.154.160"}
W1123 08:58:06.568683 1 handler_proxy.go:99] no RequestInfo found in the context
E1123 08:58:06.568889 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1123 08:58:06.569001 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1123 08:58:06.583847 1 handler_proxy.go:99] no RequestInfo found in the context
E1123 08:58:06.587393 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1123 08:58:06.587452 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [1a4750ff7e8c] <==
I1123 08:57:04.478449 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1123 08:57:04.488570 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1123 08:57:04.494373 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I1123 08:57:04.481772 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I1123 08:57:04.502443 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I1123 08:57:04.502540 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I1123 08:57:04.506670 1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
I1123 08:57:04.510647 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1123 08:57:04.566367 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
I1123 08:57:04.591835 1 shared_informer.go:349] "Waiting for caches to sync" controller="garbage collector"
I1123 08:57:04.750206 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1123 08:57:04.750262 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1123 08:57:04.750270 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1123 08:57:04.793332 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
E1123 08:57:07.066560 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.102507 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.134848 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.147364 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.152054 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.176406 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.177162 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.185205 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1123 08:57:14.479438 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
I1123 08:58:06.668391 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
E1123 08:58:06.670861 1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
==> kube-controller-manager [e0963762dabe] <==
I1123 08:55:27.305673 1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
I1123 08:55:27.305856 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I1123 08:55:27.305946 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I1123 08:55:27.307430 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I1123 08:55:27.307491 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I1123 08:55:27.307769 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I1123 08:55:27.308002 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1123 08:55:27.311526 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I1123 08:55:27.320061 1 shared_informer.go:356] "Caches are synced" controller="node"
I1123 08:55:27.320143 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I1123 08:55:27.320176 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I1123 08:55:27.320181 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1123 08:55:27.320186 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1123 08:55:27.323691 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1123 08:55:27.332119 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1123 08:55:27.332230 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1123 08:55:27.332307 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="no-preload-019660"
I1123 08:55:27.332344 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I1123 08:55:27.353034 1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
I1123 08:55:27.353188 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1123 08:55:27.353234 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1123 08:55:27.353253 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1123 08:55:27.355630 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1123 08:55:27.356002 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1123 08:55:27.484870 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="no-preload-019660" podCIDRs=["10.244.0.0/24"]
==> kube-proxy [4aea324009fd] <==
I1123 08:55:29.781436 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1123 08:55:29.882143 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1123 08:55:29.882176 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
E1123 08:55:29.882244 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1123 08:55:30.206875 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1123 08:55:30.209951 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1123 08:55:30.210016 1 server_linux.go:132] "Using iptables Proxier"
I1123 08:55:30.389394 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1123 08:55:30.398584 1 server.go:527] "Version info" version="v1.34.1"
I1123 08:55:30.411854 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:55:30.436371 1 config.go:106] "Starting endpoint slice config controller"
I1123 08:55:30.436400 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1123 08:55:30.436421 1 config.go:403] "Starting serviceCIDR config controller"
I1123 08:55:30.436428 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1123 08:55:30.441802 1 config.go:200] "Starting service config controller"
I1123 08:55:30.441827 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1123 08:55:30.456879 1 config.go:309] "Starting node config controller"
I1123 08:55:30.457052 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1123 08:55:30.457180 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1123 08:55:30.537976 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1123 08:55:30.542627 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1123 08:55:30.553889 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-proxy [8c0537e27a6f] <==
I1123 08:57:04.109885 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1123 08:57:04.212001 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1123 08:57:04.212377 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
E1123 08:57:04.212492 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1123 08:57:04.308881 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1123 08:57:04.309495 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1123 08:57:04.309923 1 server_linux.go:132] "Using iptables Proxier"
I1123 08:57:04.335219 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1123 08:57:04.338659 1 server.go:527] "Version info" version="v1.34.1"
I1123 08:57:04.339118 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:57:04.356711 1 config.go:200] "Starting service config controller"
I1123 08:57:04.358780 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1123 08:57:04.357281 1 config.go:403] "Starting serviceCIDR config controller"
I1123 08:57:04.360751 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1123 08:57:04.359340 1 config.go:309] "Starting node config controller"
I1123 08:57:04.361083 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1123 08:57:04.361217 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1123 08:57:04.357261 1 config.go:106] "Starting endpoint slice config controller"
I1123 08:57:04.361454 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1123 08:57:04.461112 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1123 08:57:04.461168 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1123 08:57:04.466392 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [266be5a40ca6] <==
I1123 08:56:59.176913 1 serving.go:386] Generated self-signed cert in-memory
W1123 08:57:01.157665 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1123 08:57:01.157869 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1123 08:57:01.157944 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W1123 08:57:01.158050 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1123 08:57:01.217478 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.1"
I1123 08:57:01.217604 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:57:01.228584 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1123 08:57:01.229023 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1123 08:57:01.231067 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1123 08:57:01.231467 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1123 08:57:01.329575 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kube-scheduler [57bb06d26ab6] <==
E1123 08:55:19.477132 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1123 08:55:19.476999 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1123 08:55:19.477074 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1123 08:55:19.478217 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1123 08:55:19.478832 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1123 08:55:19.479554 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1123 08:55:19.480141 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1123 08:55:19.480165 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1123 08:55:19.480360 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1123 08:55:19.480372 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1123 08:55:19.480530 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1123 08:55:19.480623 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1123 08:55:19.481197 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1123 08:55:19.482165 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1123 08:55:20.289908 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1123 08:55:20.337370 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1123 08:55:20.366302 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1123 08:55:20.425798 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1123 08:55:20.483335 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E1123 08:55:20.494282 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
I1123 08:55:23.055993 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1123 08:56:16.316839 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1123 08:56:16.317595 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1123 08:56:16.317742 1 server.go:265] "[graceful-termination] secure server is exiting"
E1123 08:56:16.317790 1 run.go:72] "command failed" err="finished without leader elect"
==> kubelet <==
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220241 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-k8s-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220309 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-flexvolume-dir\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220345 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-kubeconfig\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220366 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-usr-share-ca-certificates\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220392 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-ca-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220412 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-usr-share-ca-certificates\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220431 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-ca-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220451 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-k8s-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220473 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/0bd61e39ef27cab83cc049d81d34254c-kubeconfig\") pod \"kube-scheduler-no-preload-019660\" (UID: \"0bd61e39ef27cab83cc049d81d34254c\") " pod="kube-system/kube-scheduler-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.223516 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-certs\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.224048 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-data\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.281626 4357 apiserver.go:52] "Watching apiserver"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.354823 4357 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428002 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/92a336c6-9d96-4484-8903-9542086c298e-tmp\") pod \"storage-provisioner\" (UID: \"92a336c6-9d96-4484-8903-9542086c298e\") " pod="kube-system/storage-provisioner"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428072 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-xtables-lock\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428146 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-lib-modules\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.612741 4357 scope.go:117] "RemoveContainer" containerID="1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.874748 4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.877286 4357 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878430 4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-4965t_kubernetes-dashboard(d4a9e601-4647-40d6-a5d8-db1e8e067281): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878855 4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-4965t" podUID="d4a9e601-4647-40d6-a5d8-db1e8e067281"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.918928 4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.919810 4357 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921110 4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-tg8q5_kube-system(fb0df7df-58f1-4b52-8193-e19d66dd95bf): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921171 4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-tg8q5" podUID="fb0df7df-58f1-4b52-8193-e19d66dd95bf"
==> kubernetes-dashboard [57ebcdb97431] <==
2025/11/23 08:57:20 Starting overwatch
2025/11/23 08:57:20 Using namespace: kubernetes-dashboard
2025/11/23 08:57:20 Using in-cluster config to connect to apiserver
2025/11/23 08:57:20 Using secret token for csrf signing
2025/11/23 08:57:20 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/11/23 08:57:20 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/11/23 08:57:20 Successful initial request to the apiserver, version: v1.34.1
2025/11/23 08:57:20 Generating JWE encryption key
2025/11/23 08:57:20 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/11/23 08:57:20 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/11/23 08:57:21 Initializing JWE encryption key from synchronized object
2025/11/23 08:57:21 Creating in-cluster Sidecar client
2025/11/23 08:57:21 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/11/23 08:57:21 Serving insecurely on HTTP port: 9090
2025/11/23 08:58:06 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [1f0a2f0aefa9] <==
I1123 08:57:03.436717 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1123 08:57:33.518183 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [371de4a46890] <==
I1123 08:58:09.007550 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 08:58:09.042381 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 08:58:09.044488 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1123 08:58:09.057366 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:269: (dbg) Run: kubectl --context no-preload-019660 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:282: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1 (79.631105ms)
** stderr **
Error from server (NotFound): pods "metrics-server-746fcd58dc-tg8q5" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-4965t" not found
** /stderr **
helpers_test.go:287: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:252: <<< TestStartStop/group/no-preload/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p no-preload-019660 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p no-preload-019660 logs -n 25: (1.603322662s)
helpers_test.go:260: TestStartStop/group/no-preload/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ stop │ -p no-preload-019660 --alsologtostderr -v=3 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ addons │ enable dashboard -p no-preload-019660 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ start │ -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2 --kubernetes-version=v1.34.1 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
│ addons │ enable metrics-server -p embed-certs-059363 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ stop │ -p embed-certs-059363 --alsologtostderr -v=3 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ addons │ enable dashboard -p embed-certs-059363 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
│ start │ -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2 --kubernetes-version=v1.34.1 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
│ addons │ enable metrics-server -p default-k8s-diff-port-925051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ stop │ -p default-k8s-diff-port-925051 --alsologtostderr -v=3 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ image │ old-k8s-version-896471 image list --format=json │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ pause │ -p old-k8s-version-896471 --alsologtostderr -v=1 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ unpause │ -p old-k8s-version-896471 --alsologtostderr -v=1 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ delete │ -p old-k8s-version-896471 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ delete │ -p old-k8s-version-896471 │ old-k8s-version-896471 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ start │ -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2 --kubernetes-version=v1.34.1 │ newest-cni-078196 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ │
│ addons │ enable dashboard -p default-k8s-diff-port-925051 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ start │ -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2 --kubernetes-version=v1.34.1 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ │
│ image │ no-preload-019660 image list --format=json │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ pause │ -p no-preload-019660 --alsologtostderr -v=1 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
│ image │ embed-certs-059363 image list --format=json │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ pause │ -p embed-certs-059363 --alsologtostderr -v=1 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ unpause │ -p no-preload-019660 --alsologtostderr -v=1 │ no-preload-019660 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ unpause │ -p embed-certs-059363 --alsologtostderr -v=1 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ delete │ -p embed-certs-059363 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
│ delete │ -p embed-certs-059363 │ embed-certs-059363 │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/23 08:57:16
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.25.3 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1123 08:57:16.853497 62480 out.go:360] Setting OutFile to fd 1 ...
I1123 08:57:16.853743 62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:57:16.853753 62480 out.go:374] Setting ErrFile to fd 2...
I1123 08:57:16.853757 62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:57:16.854434 62480 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:57:16.855203 62480 out.go:368] Setting JSON to false
I1123 08:57:16.856605 62480 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":5986,"bootTime":1763882251,"procs":197,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I1123 08:57:16.856696 62480 start.go:143] virtualization: kvm guest
I1123 08:57:16.935723 62480 out.go:179] * [default-k8s-diff-port-925051] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1123 08:57:16.941914 62480 out.go:179] - MINIKUBE_LOCATION=21966
I1123 08:57:16.941916 62480 notify.go:221] Checking for updates...
I1123 08:57:16.943817 62480 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1123 08:57:16.945573 62480 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
I1123 08:57:16.946745 62480 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
I1123 08:57:16.947938 62480 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1123 08:57:16.949027 62480 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1123 08:57:16.950511 62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:16.951037 62480 driver.go:422] Setting default libvirt URI to qemu:///system
I1123 08:57:16.994324 62480 out.go:179] * Using the kvm2 driver based on existing profile
I1123 08:57:16.995670 62480 start.go:309] selected driver: kvm2
I1123 08:57:16.995691 62480 start.go:927] validating driver "kvm2" against &{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesCo
nfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] L
istenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:16.995851 62480 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1123 08:57:16.997354 62480 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:57:16.997396 62480 cni.go:84] Creating CNI manager for ""
I1123 08:57:16.997466 62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:16.997521 62480 start.go:353] cluster config:
{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Names
pace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpir
ation:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:16.997662 62480 iso.go:125] acquiring lock: {Name:mk9cdb644d601a15f26caa6d527f7a63e06eb691 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:57:16.999287 62480 out.go:179] * Starting "default-k8s-diff-port-925051" primary control-plane node in "default-k8s-diff-port-925051" cluster
I1123 08:57:16.538965 62034 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:16.543216 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.543908 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.543934 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.544164 62034 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/config.json ...
I1123 08:57:16.544418 62034 machine.go:94] provisionDockerMachine start ...
I1123 08:57:16.547123 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.547583 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.547608 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.547766 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:16.547963 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:16.547972 62034 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:57:16.673771 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1123 08:57:16.673806 62034 buildroot.go:166] provisioning hostname "embed-certs-059363"
I1123 08:57:16.677167 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.677679 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.677711 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.677931 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:16.678192 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:16.678214 62034 main.go:143] libmachine: About to run SSH command:
sudo hostname embed-certs-059363 && echo "embed-certs-059363" | sudo tee /etc/hostname
I1123 08:57:16.832499 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-059363
I1123 08:57:16.837251 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.837813 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.837855 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.838109 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:16.838438 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:16.838465 62034 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-059363' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-059363/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-059363' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:57:16.972318 62034 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:16.972350 62034 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
I1123 08:57:16.972374 62034 buildroot.go:174] setting up certificates
I1123 08:57:16.972395 62034 provision.go:84] configureAuth start
I1123 08:57:16.976994 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.977623 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.977662 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.980665 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.981134 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:16.981158 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:16.981351 62034 provision.go:143] copyHostCerts
I1123 08:57:16.981431 62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
I1123 08:57:16.981446 62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
I1123 08:57:16.981523 62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
I1123 08:57:16.981635 62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
I1123 08:57:16.981646 62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
I1123 08:57:16.981690 62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
I1123 08:57:16.981769 62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
I1123 08:57:16.981779 62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
I1123 08:57:16.981817 62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
I1123 08:57:16.981897 62034 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.embed-certs-059363 san=[127.0.0.1 192.168.72.170 embed-certs-059363 localhost minikube]
I1123 08:57:17.112794 62034 provision.go:177] copyRemoteCerts
I1123 08:57:17.112848 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:57:17.115853 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.116282 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.116308 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.116478 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:17.223809 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1123 08:57:17.266771 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 08:57:17.305976 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I1123 08:57:17.336820 62034 provision.go:87] duration metric: took 364.408049ms to configureAuth
I1123 08:57:17.336863 62034 buildroot.go:189] setting minikube options for container-runtime
I1123 08:57:17.337080 62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:17.339671 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.340090 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.340112 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.340318 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:17.340623 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:17.340643 62034 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1123 08:57:17.463677 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1123 08:57:17.463707 62034 buildroot.go:70] root file system type: tmpfs
I1123 08:57:17.463928 62034 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1123 08:57:17.467227 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.467655 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.467686 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.467940 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:17.468174 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:17.468268 62034 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1123 08:57:17.602870 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1123 08:57:17.606541 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.607111 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:17.607152 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:17.607427 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:17.607698 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:17.607716 62034 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1123 08:57:19.186051 62386 start.go:364] duration metric: took 9.989286317s to acquireMachinesLock for "newest-cni-078196"
I1123 08:57:19.186120 62386 start.go:93] Provisioning new machine with config: &{Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{K
ubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:2
62144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1123 08:57:19.186215 62386 start.go:125] createHost starting for "" (driver="kvm2")
W1123 08:57:15.950255 61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
W1123 08:57:17.951890 61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
I1123 08:57:19.962419 61684 pod_ready.go:94] pod "coredns-66bc5c9577-nj6pk" is "Ready"
I1123 08:57:19.962449 61684 pod_ready.go:86] duration metric: took 8.021055049s for pod "coredns-66bc5c9577-nj6pk" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.967799 61684 pod_ready.go:83] waiting for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.977812 61684 pod_ready.go:94] pod "etcd-no-preload-019660" is "Ready"
I1123 08:57:19.977834 61684 pod_ready.go:86] duration metric: took 10.013782ms for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.982683 61684 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.992798 61684 pod_ready.go:94] pod "kube-apiserver-no-preload-019660" is "Ready"
I1123 08:57:19.992831 61684 pod_ready.go:86] duration metric: took 10.122708ms for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:19.997939 61684 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.145706 61684 pod_ready.go:94] pod "kube-controller-manager-no-preload-019660" is "Ready"
I1123 08:57:20.145742 61684 pod_ready.go:86] duration metric: took 147.777309ms for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.355205 61684 pod_ready.go:83] waiting for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.756189 61684 pod_ready.go:94] pod "kube-proxy-wlb9w" is "Ready"
I1123 08:57:20.756259 61684 pod_ready.go:86] duration metric: took 400.985169ms for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:20.947647 61684 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:21.350509 61684 pod_ready.go:94] pod "kube-scheduler-no-preload-019660" is "Ready"
I1123 08:57:21.350539 61684 pod_ready.go:86] duration metric: took 402.864201ms for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:21.350552 61684 pod_ready.go:40] duration metric: took 9.416731421s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:57:21.405369 61684 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 08:57:21.409795 61684 out.go:179] * Done! kubectl is now configured to use "no-preload-019660" cluster and "default" namespace by default
I1123 08:57:17.000521 62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:57:17.000560 62480 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
I1123 08:57:17.000571 62480 cache.go:65] Caching tarball of preloaded images
I1123 08:57:17.000667 62480 preload.go:238] Found /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1123 08:57:17.000683 62480 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on docker
I1123 08:57:17.000806 62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
I1123 08:57:17.001089 62480 start.go:360] acquireMachinesLock for default-k8s-diff-port-925051: {Name:mka7dedac533b164a995f5c19cff4f68d827bd22 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I1123 08:57:18.895461 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1123 08:57:18.895495 62034 machine.go:97] duration metric: took 2.351059819s to provisionDockerMachine
I1123 08:57:18.895519 62034 start.go:293] postStartSetup for "embed-certs-059363" (driver="kvm2")
I1123 08:57:18.895547 62034 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:57:18.895631 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:57:18.899037 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:18.899549 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:18.899585 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:18.899747 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:18.995822 62034 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:57:19.001215 62034 info.go:137] Remote host: Buildroot 2025.02
I1123 08:57:19.001261 62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
I1123 08:57:19.001335 62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
I1123 08:57:19.001434 62034 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
I1123 08:57:19.001551 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:57:19.015155 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:19.054248 62034 start.go:296] duration metric: took 158.692501ms for postStartSetup
I1123 08:57:19.054294 62034 fix.go:56] duration metric: took 20.246777293s for fixHost
I1123 08:57:19.058146 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.058727 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.058771 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.058998 62034 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:19.059317 62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.72.170 22 <nil> <nil>}
I1123 08:57:19.059336 62034 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1123 08:57:19.185896 62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888239.115597688
I1123 08:57:19.185919 62034 fix.go:216] guest clock: 1763888239.115597688
I1123 08:57:19.185926 62034 fix.go:229] Guest: 2025-11-23 08:57:19.115597688 +0000 UTC Remote: 2025-11-23 08:57:19.054315183 +0000 UTC m=+20.376918396 (delta=61.282505ms)
I1123 08:57:19.185941 62034 fix.go:200] guest clock delta is within tolerance: 61.282505ms
I1123 08:57:19.185962 62034 start.go:83] releasing machines lock for "embed-certs-059363", held for 20.37844631s
I1123 08:57:19.189984 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.190596 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.190635 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.191288 62034 ssh_runner.go:195] Run: cat /version.json
I1123 08:57:19.191295 62034 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:57:19.195221 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.195642 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.195676 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.195699 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.195883 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:19.196195 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:19.196264 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:19.196563 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:19.315903 62034 ssh_runner.go:195] Run: systemctl --version
I1123 08:57:19.323178 62034 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:57:19.333159 62034 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:57:19.333365 62034 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:57:19.356324 62034 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:57:19.356355 62034 start.go:496] detecting cgroup driver to use...
I1123 08:57:19.356469 62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:19.385750 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:57:19.400434 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:57:19.414104 62034 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:57:19.414182 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:57:19.433788 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:19.449538 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:57:19.464107 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:19.481469 62034 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:57:19.496533 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:57:19.511385 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:57:19.525634 62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:57:19.544298 62034 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:57:19.560120 62034 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1123 08:57:19.560179 62034 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1123 08:57:19.576631 62034 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:57:19.592833 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:19.763221 62034 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:57:19.811223 62034 start.go:496] detecting cgroup driver to use...
I1123 08:57:19.811335 62034 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1123 08:57:19.833532 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:19.859627 62034 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1123 08:57:19.884432 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:19.903805 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:19.921275 62034 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:57:19.960990 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:19.980317 62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:20.008661 62034 ssh_runner.go:195] Run: which cri-dockerd
I1123 08:57:20.013631 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1123 08:57:20.029302 62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1123 08:57:20.057103 62034 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1123 08:57:20.252891 62034 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1123 08:57:20.490326 62034 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1123 08:57:20.490458 62034 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1123 08:57:20.526773 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:57:20.548985 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:20.740694 62034 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:57:21.481342 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:57:21.507341 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1123 08:57:21.530703 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:21.555618 62034 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1123 08:57:21.736442 62034 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1123 08:57:21.910308 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:22.084793 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1123 08:57:22.133988 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:22.150466 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:22.310923 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:22.333687 62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1123 08:57:22.355809 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:22.373321 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:22.392686 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:22.568456 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:22.588895 62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1123 08:57:22.604152 62034 retry.go:31] will retry after 1.30731135s: cri-docker.service not running
I1123 08:57:19.188404 62386 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
I1123 08:57:19.188687 62386 start.go:159] libmachine.API.Create for "newest-cni-078196" (driver="kvm2")
I1123 08:57:19.188735 62386 client.go:173] LocalClient.Create starting
I1123 08:57:19.188852 62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem
I1123 08:57:19.188919 62386 main.go:143] libmachine: Decoding PEM data...
I1123 08:57:19.188950 62386 main.go:143] libmachine: Parsing certificate...
I1123 08:57:19.189026 62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem
I1123 08:57:19.189059 62386 main.go:143] libmachine: Decoding PEM data...
I1123 08:57:19.189080 62386 main.go:143] libmachine: Parsing certificate...
I1123 08:57:19.189577 62386 main.go:143] libmachine: creating domain...
I1123 08:57:19.189595 62386 main.go:143] libmachine: creating network...
I1123 08:57:19.191331 62386 main.go:143] libmachine: found existing default network
I1123 08:57:19.191879 62386 main.go:143] libmachine: <network connections='3'>
<name>default</name>
<uuid>c61344c2-dba2-46dd-a21a-34776d235985</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:10:a2:1d'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
I1123 08:57:19.193313 62386 network.go:206] using free private subnet 192.168.39.0/24: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e04740}
I1123 08:57:19.193434 62386 main.go:143] libmachine: defining private network:
<network>
<name>mk-newest-cni-078196</name>
<dns enable='no'/>
<ip address='192.168.39.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.39.2' end='192.168.39.253'/>
</dhcp>
</ip>
</network>
I1123 08:57:19.200866 62386 main.go:143] libmachine: creating private network mk-newest-cni-078196 192.168.39.0/24...
I1123 08:57:19.291873 62386 main.go:143] libmachine: private network mk-newest-cni-078196 192.168.39.0/24 created
I1123 08:57:19.292226 62386 main.go:143] libmachine: <network>
<name>mk-newest-cni-078196</name>
<uuid>d7bc9eb0-778c-4b77-a392-72f78dc9558b</uuid>
<bridge name='virbr1' stp='on' delay='0'/>
<mac address='52:54:00:20:cc:6a'/>
<dns enable='no'/>
<ip address='192.168.39.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.39.2' end='192.168.39.253'/>
</dhcp>
</ip>
</network>
I1123 08:57:19.292287 62386 main.go:143] libmachine: setting up store path in /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
I1123 08:57:19.292318 62386 main.go:143] libmachine: building disk image from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso
I1123 08:57:19.292332 62386 common.go:152] Making disk image using store path: /home/jenkins/minikube-integration/21966-18241/.minikube
I1123 08:57:19.292416 62386 main.go:143] libmachine: Downloading /home/jenkins/minikube-integration/21966-18241/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso...
I1123 08:57:19.540811 62386 common.go:159] Creating ssh key: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa...
I1123 08:57:19.628322 62386 common.go:165] Creating raw disk image: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk...
I1123 08:57:19.628370 62386 main.go:143] libmachine: Writing magic tar header
I1123 08:57:19.628409 62386 main.go:143] libmachine: Writing SSH key tar header
I1123 08:57:19.628532 62386 common.go:179] Fixing permissions on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
I1123 08:57:19.628646 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196
I1123 08:57:19.628680 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 (perms=drwx------)
I1123 08:57:19.628696 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines
I1123 08:57:19.628716 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines (perms=drwxr-xr-x)
I1123 08:57:19.628737 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube
I1123 08:57:19.628753 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube (perms=drwxr-xr-x)
I1123 08:57:19.628766 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241
I1123 08:57:19.628783 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241 (perms=drwxrwxr-x)
I1123 08:57:19.628796 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration
I1123 08:57:19.628812 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration (perms=drwxrwxr-x)
I1123 08:57:19.628825 62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins
I1123 08:57:19.628845 62386 main.go:143] libmachine: setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
I1123 08:57:19.628862 62386 main.go:143] libmachine: checking permissions on dir: /home
I1123 08:57:19.628874 62386 main.go:143] libmachine: skipping /home - not owner
I1123 08:57:19.628886 62386 main.go:143] libmachine: defining domain...
I1123 08:57:19.630619 62386 main.go:143] libmachine: defining domain using XML:
<domain type='kvm'>
<name>newest-cni-078196</name>
<memory unit='MiB'>3072</memory>
<vcpu>2</vcpu>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'>
</cpu>
<os>
<type>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<devices>
<disk type='file' device='cdrom'>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='default' io='threads' />
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='mk-newest-cni-078196'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
</rng>
</devices>
</domain>
I1123 08:57:19.637651 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:7a:a4:6b in network default
I1123 08:57:19.638554 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:19.638580 62386 main.go:143] libmachine: starting domain...
I1123 08:57:19.638587 62386 main.go:143] libmachine: ensuring networks are active...
I1123 08:57:19.639501 62386 main.go:143] libmachine: Ensuring network default is active
I1123 08:57:19.640013 62386 main.go:143] libmachine: Ensuring network mk-newest-cni-078196 is active
I1123 08:57:19.640748 62386 main.go:143] libmachine: getting domain XML...
I1123 08:57:19.642270 62386 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>newest-cni-078196</name>
<uuid>67bf4217-d2fd-4841-a93c-e1581f4c5592</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:d7:c1:0d'/>
<source network='mk-newest-cni-078196'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:7a:a4:6b'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1123 08:57:21.239037 62386 main.go:143] libmachine: waiting for domain to start...
I1123 08:57:21.240876 62386 main.go:143] libmachine: domain is now running
I1123 08:57:21.240900 62386 main.go:143] libmachine: waiting for IP...
I1123 08:57:21.241736 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:21.242592 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:21.242611 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:21.243307 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:21.243346 62386 retry.go:31] will retry after 218.272628ms: waiting for domain to come up
I1123 08:57:21.462945 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:21.463818 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:21.463835 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:21.464322 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:21.464353 62386 retry.go:31] will retry after 354.758102ms: waiting for domain to come up
I1123 08:57:21.820932 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:21.821871 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:21.821891 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:21.822290 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:21.822322 62386 retry.go:31] will retry after 480.079581ms: waiting for domain to come up
I1123 08:57:22.304134 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:22.305030 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:22.305053 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:22.305471 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:22.305501 62386 retry.go:31] will retry after 430.762091ms: waiting for domain to come up
I1123 08:57:22.738137 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:22.739007 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:22.739022 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:22.739466 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:22.739499 62386 retry.go:31] will retry after 752.582052ms: waiting for domain to come up
I1123 08:57:23.493414 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:23.494256 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:23.494271 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:23.494669 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:23.494696 62386 retry.go:31] will retry after 765.228537ms: waiting for domain to come up
I1123 08:57:23.912604 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:23.930659 62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:23.946465 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:24.099133 62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:24.217974 62034 retry.go:31] will retry after 1.350292483s: cri-docker.service not running
I1123 08:57:25.569520 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:25.588082 62034 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1123 08:57:25.588166 62034 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1123 08:57:25.595521 62034 start.go:564] Will wait 60s for crictl version
I1123 08:57:25.595597 62034 ssh_runner.go:195] Run: which crictl
I1123 08:57:25.600903 62034 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1123 08:57:25.642159 62034 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1123 08:57:25.642260 62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:25.678324 62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:25.708968 62034 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1123 08:57:25.712357 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:25.712811 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:25.712861 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:25.713088 62034 ssh_runner.go:195] Run: grep 192.168.72.1 host.minikube.internal$ /etc/hosts
I1123 08:57:25.718506 62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.72.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:25.737282 62034 kubeadm.go:884] updating cluster {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: Multi
NodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:57:25.737446 62034 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:57:25.737523 62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:25.759347 62034 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:57:25.759372 62034 docker.go:621] Images already preloaded, skipping extraction
I1123 08:57:25.759440 62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:25.784761 62034 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:57:25.784786 62034 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:57:25.784796 62034 kubeadm.go:935] updating node { 192.168.72.170 8443 v1.34.1 docker true true} ...
I1123 08:57:25.784906 62034 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-059363 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.72.170
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:57:25.784959 62034 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1123 08:57:25.840443 62034 cni.go:84] Creating CNI manager for ""
I1123 08:57:25.840484 62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:25.840500 62034 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:57:25.840520 62034 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.72.170 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-059363 NodeName:embed-certs-059363 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.72.170"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.72.170 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:57:25.840651 62034 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.72.170
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "embed-certs-059363"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.72.170"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.72.170"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:57:25.840731 62034 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:57:25.855481 62034 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:57:25.855562 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:57:25.869149 62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (319 bytes)
I1123 08:57:25.890030 62034 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:57:25.913602 62034 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
I1123 08:57:25.939399 62034 ssh_runner.go:195] Run: grep 192.168.72.170 control-plane.minikube.internal$ /etc/hosts
I1123 08:57:25.944187 62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.72.170 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:25.959980 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:26.112182 62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:57:26.150488 62034 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363 for IP: 192.168.72.170
I1123 08:57:26.150514 62034 certs.go:195] generating shared ca certs ...
I1123 08:57:26.150535 62034 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:26.150704 62034 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
I1123 08:57:26.150759 62034 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
I1123 08:57:26.150773 62034 certs.go:257] generating profile certs ...
I1123 08:57:26.150910 62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/client.key
I1123 08:57:26.151011 62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key.4b3bdd21
I1123 08:57:26.151069 62034 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key
I1123 08:57:26.151216 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
W1123 08:57:26.151290 62034 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
I1123 08:57:26.151305 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:57:26.151344 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
I1123 08:57:26.151380 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
I1123 08:57:26.151415 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
I1123 08:57:26.151483 62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:26.152356 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:57:26.201568 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:57:26.246367 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:57:26.299610 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:57:26.334177 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
I1123 08:57:26.372484 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1123 08:57:26.408684 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:57:26.449833 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1123 08:57:26.493006 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
I1123 08:57:26.527341 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:57:26.564892 62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
I1123 08:57:26.601408 62034 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:57:26.626296 62034 ssh_runner.go:195] Run: openssl version
I1123 08:57:26.634385 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
I1123 08:57:26.650265 62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
I1123 08:57:26.657578 62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
I1123 08:57:26.657632 62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
I1123 08:57:26.666331 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:57:26.682746 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:57:26.697978 62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:26.704544 62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:26.704612 62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:26.714575 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:57:26.730139 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
I1123 08:57:26.745401 62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
I1123 08:57:26.751383 62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
I1123 08:57:26.751450 62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
I1123 08:57:26.760273 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
I1123 08:57:26.775477 62034 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:57:26.782298 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1123 08:57:26.790966 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1123 08:57:26.800082 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1123 08:57:26.809033 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1123 08:57:26.818403 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1123 08:57:26.827424 62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1123 08:57:26.836600 62034 kubeadm.go:401] StartCluster: {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNod
eRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:26.836750 62034 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1123 08:57:26.857858 62034 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:57:26.872778 62034 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1123 08:57:26.872804 62034 kubeadm.go:598] restartPrimaryControlPlane start ...
I1123 08:57:26.872861 62034 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1123 08:57:26.887408 62034 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1123 08:57:26.888007 62034 kubeconfig.go:47] verify endpoint returned: get endpoint: "embed-certs-059363" does not appear in /home/jenkins/minikube-integration/21966-18241/kubeconfig
I1123 08:57:26.888341 62034 kubeconfig.go:62] /home/jenkins/minikube-integration/21966-18241/kubeconfig needs updating (will repair): [kubeconfig missing "embed-certs-059363" cluster setting kubeconfig missing "embed-certs-059363" context setting]
I1123 08:57:26.888835 62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:26.917419 62034 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1123 08:57:26.931495 62034 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.72.170
I1123 08:57:26.931533 62034 kubeadm.go:1161] stopping kube-system containers ...
I1123 08:57:26.931598 62034 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1123 08:57:26.956424 62034 docker.go:484] Stopping containers: [2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58]
I1123 08:57:26.956515 62034 ssh_runner.go:195] Run: docker stop 2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58
I1123 08:57:26.982476 62034 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1123 08:57:27.015459 62034 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:57:27.030576 62034 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:57:27.030600 62034 kubeadm.go:158] found existing configuration files:
I1123 08:57:27.030658 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:57:27.043658 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:57:27.043723 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:57:27.058167 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:57:27.074375 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:57:27.074449 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:57:27.091119 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:57:27.106772 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:57:27.106876 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:57:27.124425 62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:57:27.140001 62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:57:27.140061 62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:57:27.154930 62034 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:57:27.169444 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:27.328883 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:24.261134 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:24.261787 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:24.261806 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:24.262181 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:24.262219 62386 retry.go:31] will retry after 1.137472458s: waiting for domain to come up
I1123 08:57:25.401597 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:25.402373 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:25.402395 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:25.402716 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:25.402745 62386 retry.go:31] will retry after 1.246843188s: waiting for domain to come up
I1123 08:57:26.651383 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:26.652402 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:26.652423 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:26.652983 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:26.653027 62386 retry.go:31] will retry after 1.576847177s: waiting for domain to come up
I1123 08:57:28.231063 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:28.231892 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:28.231907 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:28.232342 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:28.232376 62386 retry.go:31] will retry after 2.191968701s: waiting for domain to come up
I1123 08:57:29.072122 62034 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.743194687s)
I1123 08:57:29.072199 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:29.363322 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:29.437121 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:29.519180 62034 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:57:29.519372 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:30.019409 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:30.519973 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:31.019428 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:31.127420 62034 api_server.go:72] duration metric: took 1.608256805s to wait for apiserver process to appear ...
I1123 08:57:31.127455 62034 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:57:31.127480 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:31.128203 62034 api_server.go:269] stopped: https://192.168.72.170:8443/healthz: Get "https://192.168.72.170:8443/healthz": dial tcp 192.168.72.170:8443: connect: connection refused
I1123 08:57:31.627812 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:30.426848 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:30.427811 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:30.427838 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:30.428254 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:30.428293 62386 retry.go:31] will retry after 2.66246372s: waiting for domain to come up
I1123 08:57:33.093605 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:33.094467 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:33.094487 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:33.095017 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:33.095058 62386 retry.go:31] will retry after 2.368738453s: waiting for domain to come up
I1123 08:57:34.364730 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W1123 08:57:34.364762 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I1123 08:57:34.364778 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:34.401309 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W1123 08:57:34.401349 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I1123 08:57:34.627677 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:34.639017 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1123 08:57:34.639052 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1123 08:57:35.127669 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:35.133471 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1123 08:57:35.133500 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1123 08:57:35.628190 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:35.637607 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1123 08:57:35.637636 62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1123 08:57:36.128401 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:36.134007 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
ok
I1123 08:57:36.142338 62034 api_server.go:141] control plane version: v1.34.1
I1123 08:57:36.142374 62034 api_server.go:131] duration metric: took 5.014912025s to wait for apiserver health ...
I1123 08:57:36.142383 62034 cni.go:84] Creating CNI manager for ""
I1123 08:57:36.142394 62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:36.144644 62034 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1123 08:57:36.146156 62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1123 08:57:36.172405 62034 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1123 08:57:36.206117 62034 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:57:36.212151 62034 system_pods.go:59] 8 kube-system pods found
I1123 08:57:36.212192 62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1123 08:57:36.212201 62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1123 08:57:36.212209 62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1123 08:57:36.212215 62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1123 08:57:36.212219 62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
I1123 08:57:36.212227 62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1123 08:57:36.212254 62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1123 08:57:36.212263 62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
I1123 08:57:36.212272 62034 system_pods.go:74] duration metric: took 6.125497ms to wait for pod list to return data ...
I1123 08:57:36.212281 62034 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:57:36.216399 62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1123 08:57:36.216437 62034 node_conditions.go:123] node cpu capacity is 2
I1123 08:57:36.216455 62034 node_conditions.go:105] duration metric: took 4.163261ms to run NodePressure ...
I1123 08:57:36.216523 62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1123 08:57:36.499954 62034 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1123 08:57:36.504225 62034 kubeadm.go:744] kubelet initialised
I1123 08:57:36.504271 62034 kubeadm.go:745] duration metric: took 4.279186ms waiting for restarted kubelet to initialise ...
I1123 08:57:36.504293 62034 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:57:36.525819 62034 ops.go:34] apiserver oom_adj: -16
I1123 08:57:36.525847 62034 kubeadm.go:602] duration metric: took 9.653035112s to restartPrimaryControlPlane
I1123 08:57:36.525859 62034 kubeadm.go:403] duration metric: took 9.689268169s to StartCluster
I1123 08:57:36.525879 62034 settings.go:142] acquiring lock: {Name:mk0efabf238cb985c892ac3a9b32ac206b9f2336 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:36.525969 62034 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21966-18241/kubeconfig
I1123 08:57:36.527038 62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:36.527368 62034 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I1123 08:57:36.527458 62034 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1123 08:57:36.527579 62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:36.527600 62034 addons.go:70] Setting metrics-server=true in profile "embed-certs-059363"
I1123 08:57:36.527599 62034 addons.go:70] Setting default-storageclass=true in profile "embed-certs-059363"
I1123 08:57:36.527579 62034 addons.go:70] Setting storage-provisioner=true in profile "embed-certs-059363"
I1123 08:57:36.527644 62034 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "embed-certs-059363"
I1123 08:57:36.527635 62034 addons.go:70] Setting dashboard=true in profile "embed-certs-059363"
I1123 08:57:36.527665 62034 addons.go:239] Setting addon dashboard=true in "embed-certs-059363"
W1123 08:57:36.527679 62034 addons.go:248] addon dashboard should already be in state true
I1123 08:57:36.527666 62034 cache.go:107] acquiring lock: {Name:mk5578ff0020d8c222414769e0c7ca17014d52f1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1123 08:57:36.527671 62034 addons.go:239] Setting addon storage-provisioner=true in "embed-certs-059363"
W1123 08:57:36.527702 62034 addons.go:248] addon storage-provisioner should already be in state true
I1123 08:57:36.527733 62034 cache.go:115] /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1123 08:57:36.527637 62034 addons.go:239] Setting addon metrics-server=true in "embed-certs-059363"
I1123 08:57:36.527748 62034 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 96.823µs
I1123 08:57:36.527758 62034 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1123 08:57:36.527763 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.527766 62034 cache.go:87] Successfully saved all images to host disk.
W1123 08:57:36.527758 62034 addons.go:248] addon metrics-server should already be in state true
I1123 08:57:36.527796 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.527738 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.527934 62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:36.529271 62034 out.go:179] * Verifying Kubernetes components...
I1123 08:57:36.530935 62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:36.531022 62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:36.532294 62034 addons.go:239] Setting addon default-storageclass=true in "embed-certs-059363"
W1123 08:57:36.532326 62034 addons.go:248] addon default-storageclass should already be in state true
I1123 08:57:36.532348 62034 host.go:66] Checking if "embed-certs-059363" exists ...
I1123 08:57:36.533191 62034 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1123 08:57:36.533215 62034 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1123 08:57:36.533195 62034 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1123 08:57:36.534073 62034 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1123 08:57:36.534091 62034 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1123 08:57:36.534667 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.535129 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.535347 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.535858 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.536061 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1123 08:57:36.536084 62034 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1123 08:57:36.536132 62034 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:57:36.536145 62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1123 08:57:36.536880 62034 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1123 08:57:36.537788 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.538214 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1123 08:57:36.538249 62034 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I1123 08:57:36.538746 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.538816 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.539088 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.540090 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.540146 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.541026 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.541069 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.541120 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.541158 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.541257 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.541514 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.542423 62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.542896 62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
I1123 08:57:36.542931 62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
I1123 08:57:36.543116 62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
I1123 08:57:36.844170 62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:57:36.869742 62034 node_ready.go:35] waiting up to 6m0s for node "embed-certs-059363" to be "Ready" ...
I1123 08:57:36.960323 62034 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:57:36.960371 62034 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:57:36.960379 62034 cache_images.go:264] succeeded pushing to: embed-certs-059363
I1123 08:57:37.000609 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1123 08:57:37.008492 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1123 08:57:37.017692 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1123 08:57:37.017713 62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1123 08:57:37.020529 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1123 08:57:37.020561 62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1123 08:57:37.074670 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1123 08:57:37.074710 62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1123 08:57:37.076076 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1123 08:57:37.076096 62034 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1123 08:57:37.132446 62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1123 08:57:37.132466 62034 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1123 08:57:37.134322 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1123 08:57:37.134339 62034 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1123 08:57:37.188291 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1123 08:57:37.188311 62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1123 08:57:37.200924 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1123 08:57:37.265084 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1123 08:57:37.265109 62034 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1123 08:57:37.341532 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1123 08:57:37.341559 62034 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1123 08:57:37.425079 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1123 08:57:37.425110 62034 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1123 08:57:37.510704 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1123 08:57:37.510748 62034 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1123 08:57:37.600957 62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1123 08:57:37.600982 62034 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1123 08:57:37.663098 62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1123 08:57:38.728547 62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.720019273s)
I1123 08:57:38.824306 62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.623332944s)
I1123 08:57:38.824374 62034 addons.go:495] Verifying addon metrics-server=true in "embed-certs-059363"
W1123 08:57:38.886375 62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
I1123 08:57:39.122207 62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.459038888s)
I1123 08:57:39.124248 62034 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p embed-certs-059363 addons enable metrics-server
I1123 08:57:39.126125 62034 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1123 08:57:35.465742 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:35.466525 62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
I1123 08:57:35.466540 62386 main.go:143] libmachine: trying to list again with source=arp
I1123 08:57:35.467003 62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
I1123 08:57:35.467033 62386 retry.go:31] will retry after 4.454598391s: waiting for domain to come up
I1123 08:57:42.467134 62480 start.go:364] duration metric: took 25.46601127s to acquireMachinesLock for "default-k8s-diff-port-925051"
I1123 08:57:42.467190 62480 start.go:96] Skipping create...Using existing machine configuration
I1123 08:57:42.467196 62480 fix.go:54] fixHost starting:
I1123 08:57:42.469900 62480 fix.go:112] recreateIfNeeded on default-k8s-diff-port-925051: state=Stopped err=<nil>
W1123 08:57:42.469946 62480 fix.go:138] unexpected machine state, will restart: <nil>
I1123 08:57:39.127521 62034 addons.go:530] duration metric: took 2.600069679s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
W1123 08:57:41.375432 62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
I1123 08:57:39.922903 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:39.923713 62386 main.go:143] libmachine: domain newest-cni-078196 has current primary IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:39.923726 62386 main.go:143] libmachine: found domain IP: 192.168.39.87
I1123 08:57:39.923732 62386 main.go:143] libmachine: reserving static IP address...
I1123 08:57:39.924129 62386 main.go:143] libmachine: unable to find host DHCP lease matching {name: "newest-cni-078196", mac: "52:54:00:d7:c1:0d", ip: "192.168.39.87"} in network mk-newest-cni-078196
I1123 08:57:40.154544 62386 main.go:143] libmachine: reserved static IP address 192.168.39.87 for domain newest-cni-078196
I1123 08:57:40.154569 62386 main.go:143] libmachine: waiting for SSH...
I1123 08:57:40.154577 62386 main.go:143] libmachine: Getting to WaitForSSH function...
I1123 08:57:40.157877 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.158255 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:minikube Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.158277 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.158452 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.158677 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.158690 62386 main.go:143] libmachine: About to run SSH command:
exit 0
I1123 08:57:40.266068 62386 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:40.266484 62386 main.go:143] libmachine: domain creation complete
I1123 08:57:40.268135 62386 machine.go:94] provisionDockerMachine start ...
I1123 08:57:40.270701 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.271083 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.271106 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.271243 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.271436 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.271446 62386 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:57:40.377718 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1123 08:57:40.377749 62386 buildroot.go:166] provisioning hostname "newest-cni-078196"
I1123 08:57:40.381682 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.382224 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.382274 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.382549 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.382750 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.382763 62386 main.go:143] libmachine: About to run SSH command:
sudo hostname newest-cni-078196 && echo "newest-cni-078196" | sudo tee /etc/hostname
I1123 08:57:40.510920 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: newest-cni-078196
I1123 08:57:40.514470 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.514870 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.514901 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.515119 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.515349 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.515373 62386 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\snewest-cni-078196' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-078196/g' /etc/hosts;
else
echo '127.0.1.1 newest-cni-078196' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:57:40.644008 62386 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:57:40.644045 62386 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
I1123 08:57:40.644119 62386 buildroot.go:174] setting up certificates
I1123 08:57:40.644132 62386 provision.go:84] configureAuth start
I1123 08:57:40.647940 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.648462 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.648495 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.651488 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.651967 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.652002 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.652153 62386 provision.go:143] copyHostCerts
I1123 08:57:40.652210 62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
I1123 08:57:40.652252 62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
I1123 08:57:40.652340 62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
I1123 08:57:40.652511 62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
I1123 08:57:40.652528 62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
I1123 08:57:40.652580 62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
I1123 08:57:40.652714 62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
I1123 08:57:40.652735 62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
I1123 08:57:40.652778 62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
I1123 08:57:40.652872 62386 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.newest-cni-078196 san=[127.0.0.1 192.168.39.87 localhost minikube newest-cni-078196]
I1123 08:57:40.723606 62386 provision.go:177] copyRemoteCerts
I1123 08:57:40.723663 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:57:40.726615 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.727086 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.727115 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.727301 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:40.819420 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 08:57:40.852505 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1123 08:57:40.888555 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1123 08:57:40.923977 62386 provision.go:87] duration metric: took 279.828188ms to configureAuth
I1123 08:57:40.924014 62386 buildroot.go:189] setting minikube options for container-runtime
I1123 08:57:40.924275 62386 config.go:182] Loaded profile config "newest-cni-078196": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:57:40.927517 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.927915 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:40.927938 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:40.928098 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:40.928391 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:40.928404 62386 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1123 08:57:41.042673 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1123 08:57:41.042707 62386 buildroot.go:70] root file system type: tmpfs
I1123 08:57:41.042873 62386 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1123 08:57:41.046445 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.046989 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:41.047094 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.047391 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:41.047683 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:41.047769 62386 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1123 08:57:41.175224 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1123 08:57:41.178183 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.178676 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:41.178702 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:41.178902 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:41.179152 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:41.179171 62386 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1123 08:57:42.186295 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1123 08:57:42.186331 62386 machine.go:97] duration metric: took 1.918179804s to provisionDockerMachine
I1123 08:57:42.186347 62386 client.go:176] duration metric: took 22.997600307s to LocalClient.Create
I1123 08:57:42.186371 62386 start.go:167] duration metric: took 22.997685492s to libmachine.API.Create "newest-cni-078196"
I1123 08:57:42.186382 62386 start.go:293] postStartSetup for "newest-cni-078196" (driver="kvm2")
I1123 08:57:42.186396 62386 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:57:42.186475 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:57:42.189917 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.190351 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.190388 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.190560 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:42.283393 62386 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:57:42.289999 62386 info.go:137] Remote host: Buildroot 2025.02
I1123 08:57:42.290030 62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
I1123 08:57:42.290117 62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
I1123 08:57:42.290218 62386 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
I1123 08:57:42.290354 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:57:42.306924 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:42.343081 62386 start.go:296] duration metric: took 156.683452ms for postStartSetup
I1123 08:57:42.347012 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.347579 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.347619 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.347939 62386 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/config.json ...
I1123 08:57:42.348140 62386 start.go:128] duration metric: took 23.161911818s to createHost
I1123 08:57:42.350835 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.351301 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.351336 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.351513 62386 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:42.351791 62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.39.87 22 <nil> <nil>}
I1123 08:57:42.351806 62386 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1123 08:57:42.466967 62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888262.440217357
I1123 08:57:42.466993 62386 fix.go:216] guest clock: 1763888262.440217357
I1123 08:57:42.467001 62386 fix.go:229] Guest: 2025-11-23 08:57:42.440217357 +0000 UTC Remote: 2025-11-23 08:57:42.348151583 +0000 UTC m=+33.279616417 (delta=92.065774ms)
I1123 08:57:42.467025 62386 fix.go:200] guest clock delta is within tolerance: 92.065774ms
I1123 08:57:42.467033 62386 start.go:83] releasing machines lock for "newest-cni-078196", held for 23.280957089s
I1123 08:57:42.471032 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.471501 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.471531 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.472456 62386 ssh_runner.go:195] Run: cat /version.json
I1123 08:57:42.472536 62386 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:57:42.477011 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.477058 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.477612 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.477644 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.479664 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:42.479706 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:42.480287 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:42.480869 62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
I1123 08:57:42.593772 62386 ssh_runner.go:195] Run: systemctl --version
I1123 08:57:42.603410 62386 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:57:42.614510 62386 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:57:42.614601 62386 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:57:42.645967 62386 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:57:42.646003 62386 start.go:496] detecting cgroup driver to use...
I1123 08:57:42.646138 62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:42.678706 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:57:42.694705 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:57:42.713341 62386 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:57:42.713419 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:57:42.729085 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:42.747983 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:57:42.768036 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:57:42.784061 62386 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:57:42.803711 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:57:42.822385 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:57:42.837748 62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:57:42.858942 62386 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:57:42.873841 62386 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1123 08:57:42.873924 62386 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1123 08:57:42.888503 62386 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:57:42.902894 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:43.087215 62386 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:57:43.137011 62386 start.go:496] detecting cgroup driver to use...
I1123 08:57:43.137115 62386 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1123 08:57:43.166541 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:43.198142 62386 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1123 08:57:43.220890 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:57:43.239791 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:43.260304 62386 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:57:43.296702 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:57:43.316993 62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:57:43.348493 62386 ssh_runner.go:195] Run: which cri-dockerd
I1123 08:57:43.353715 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1123 08:57:43.367872 62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1123 08:57:43.391806 62386 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1123 08:57:43.570922 62386 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1123 08:57:43.771497 62386 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1123 08:57:43.771641 62386 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1123 08:57:43.796840 62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:57:43.815699 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:43.997592 62386 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:57:44.541819 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:57:44.559735 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1123 08:57:44.577562 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:44.595133 62386 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1123 08:57:44.759253 62386 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1123 08:57:44.927897 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:45.126443 62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1123 08:57:45.161272 62386 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:57:45.179561 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:45.365439 62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:57:45.512591 62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:57:45.537318 62386 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1123 08:57:45.537393 62386 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1123 08:57:45.546577 62386 start.go:564] Will wait 60s for crictl version
I1123 08:57:45.546657 62386 ssh_runner.go:195] Run: which crictl
I1123 08:57:45.553243 62386 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1123 08:57:45.597074 62386 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1123 08:57:45.597163 62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:45.640023 62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:57:45.668409 62386 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1123 08:57:45.671742 62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:45.672152 62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
I1123 08:57:45.672174 62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
I1123 08:57:45.672386 62386 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I1123 08:57:45.677208 62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.39.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:45.697750 62386 out.go:179] - kubeadm.pod-network-cidr=10.42.0.0/16
I1123 08:57:42.471379 62480 out.go:252] * Restarting existing kvm2 VM for "default-k8s-diff-port-925051" ...
I1123 08:57:42.471439 62480 main.go:143] libmachine: starting domain...
I1123 08:57:42.471451 62480 main.go:143] libmachine: ensuring networks are active...
I1123 08:57:42.472371 62480 main.go:143] libmachine: Ensuring network default is active
I1123 08:57:42.473208 62480 main.go:143] libmachine: Ensuring network mk-default-k8s-diff-port-925051 is active
I1123 08:57:42.474158 62480 main.go:143] libmachine: getting domain XML...
I1123 08:57:42.476521 62480 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>default-k8s-diff-port-925051</name>
<uuid>faa8704c-25e4-4eae-b827-cb508c4f9f54</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/default-k8s-diff-port-925051.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:19:c7:db'/>
<source network='mk-default-k8s-diff-port-925051'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:fd:c0:c5'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1123 08:57:44.035948 62480 main.go:143] libmachine: waiting for domain to start...
I1123 08:57:44.037946 62480 main.go:143] libmachine: domain is now running
I1123 08:57:44.037965 62480 main.go:143] libmachine: waiting for IP...
I1123 08:57:44.039014 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.039860 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has current primary IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.039874 62480 main.go:143] libmachine: found domain IP: 192.168.83.137
I1123 08:57:44.039880 62480 main.go:143] libmachine: reserving static IP address...
I1123 08:57:44.040364 62480 main.go:143] libmachine: found host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:57:44.040404 62480 main.go:143] libmachine: skip adding static IP to network mk-default-k8s-diff-port-925051 - found existing host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"}
I1123 08:57:44.040416 62480 main.go:143] libmachine: reserved static IP address 192.168.83.137 for domain default-k8s-diff-port-925051
I1123 08:57:44.040421 62480 main.go:143] libmachine: waiting for SSH...
I1123 08:57:44.040425 62480 main.go:143] libmachine: Getting to WaitForSSH function...
I1123 08:57:44.043072 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.043526 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:57:44.043551 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:57:44.043747 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:57:44.044097 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:57:44.044119 62480 main.go:143] libmachine: About to run SSH command:
exit 0
W1123 08:57:43.874417 62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
I1123 08:57:44.875063 62034 node_ready.go:49] node "embed-certs-059363" is "Ready"
I1123 08:57:44.875101 62034 node_ready.go:38] duration metric: took 8.005319911s for node "embed-certs-059363" to be "Ready" ...
I1123 08:57:44.875126 62034 api_server.go:52] waiting for apiserver process to appear ...
I1123 08:57:44.875194 62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1123 08:57:44.908964 62034 api_server.go:72] duration metric: took 8.381553502s to wait for apiserver process to appear ...
I1123 08:57:44.908993 62034 api_server.go:88] waiting for apiserver healthz status ...
I1123 08:57:44.909013 62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
I1123 08:57:44.924580 62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
ok
I1123 08:57:44.927212 62034 api_server.go:141] control plane version: v1.34.1
I1123 08:57:44.927254 62034 api_server.go:131] duration metric: took 18.252447ms to wait for apiserver health ...
I1123 08:57:44.927266 62034 system_pods.go:43] waiting for kube-system pods to appear ...
I1123 08:57:44.936682 62034 system_pods.go:59] 8 kube-system pods found
I1123 08:57:44.936719 62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
I1123 08:57:44.936727 62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
I1123 08:57:44.936746 62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1123 08:57:44.936754 62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
I1123 08:57:44.936762 62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
I1123 08:57:44.936772 62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1123 08:57:44.936780 62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1123 08:57:44.936786 62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
I1123 08:57:44.936794 62034 system_pods.go:74] duration metric: took 9.520766ms to wait for pod list to return data ...
I1123 08:57:44.936804 62034 default_sa.go:34] waiting for default service account to be created ...
I1123 08:57:44.948188 62034 default_sa.go:45] found service account: "default"
I1123 08:57:44.948225 62034 default_sa.go:55] duration metric: took 11.401143ms for default service account to be created ...
I1123 08:57:44.948255 62034 system_pods.go:116] waiting for k8s-apps to be running ...
I1123 08:57:44.951719 62034 system_pods.go:86] 8 kube-system pods found
I1123 08:57:44.951754 62034 system_pods.go:89] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
I1123 08:57:44.951774 62034 system_pods.go:89] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
I1123 08:57:44.951787 62034 system_pods.go:89] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1123 08:57:44.951803 62034 system_pods.go:89] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
I1123 08:57:44.951812 62034 system_pods.go:89] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
I1123 08:57:44.951821 62034 system_pods.go:89] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1123 08:57:44.951837 62034 system_pods.go:89] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1123 08:57:44.951850 62034 system_pods.go:89] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
I1123 08:57:44.951862 62034 system_pods.go:126] duration metric: took 3.598572ms to wait for k8s-apps to be running ...
I1123 08:57:44.951872 62034 system_svc.go:44] waiting for kubelet service to be running ....
I1123 08:57:44.951940 62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1123 08:57:44.981007 62034 system_svc.go:56] duration metric: took 29.122206ms WaitForService to wait for kubelet
I1123 08:57:44.981059 62034 kubeadm.go:587] duration metric: took 8.453653674s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1123 08:57:44.981082 62034 node_conditions.go:102] verifying NodePressure condition ...
I1123 08:57:44.985604 62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1123 08:57:44.985627 62034 node_conditions.go:123] node cpu capacity is 2
I1123 08:57:44.985639 62034 node_conditions.go:105] duration metric: took 4.549928ms to run NodePressure ...
I1123 08:57:44.985653 62034 start.go:242] waiting for startup goroutines ...
I1123 08:57:44.985663 62034 start.go:247] waiting for cluster config update ...
I1123 08:57:44.985678 62034 start.go:256] writing updated cluster config ...
I1123 08:57:44.986007 62034 ssh_runner.go:195] Run: rm -f paused
I1123 08:57:44.992429 62034 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:57:44.997825 62034 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.005294 62034 pod_ready.go:94] pod "coredns-66bc5c9577-665gz" is "Ready"
I1123 08:57:45.005321 62034 pod_ready.go:86] duration metric: took 7.470836ms for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.008602 62034 pod_ready.go:83] waiting for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.017355 62034 pod_ready.go:94] pod "etcd-embed-certs-059363" is "Ready"
I1123 08:57:45.017385 62034 pod_ready.go:86] duration metric: took 8.758566ms for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:45.020737 62034 pod_ready.go:83] waiting for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
W1123 08:57:47.036716 62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
I1123 08:57:45.699160 62386 kubeadm.go:884] updating cluster {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144
MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:57:45.699335 62386 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:57:45.699438 62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:45.722240 62386 docker.go:691] Got preloaded images:
I1123 08:57:45.722266 62386 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.1 wasn't preloaded
I1123 08:57:45.722318 62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1123 08:57:45.737539 62386 ssh_runner.go:195] Run: which lz4
I1123 08:57:45.742521 62386 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
I1123 08:57:45.748122 62386 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/preloaded.tar.lz4': No such file or directory
I1123 08:57:45.748156 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (353378914 bytes)
I1123 08:57:47.397908 62386 docker.go:655] duration metric: took 1.655425847s to copy over tarball
I1123 08:57:47.398050 62386 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
I1123 08:57:49.041182 62386 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.643095229s)
I1123 08:57:49.041212 62386 ssh_runner.go:146] rm: /preloaded.tar.lz4
I1123 08:57:49.084378 62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1123 08:57:49.103760 62386 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
W1123 08:57:49.601859 62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
I1123 08:57:50.104106 62034 pod_ready.go:94] pod "kube-apiserver-embed-certs-059363" is "Ready"
I1123 08:57:50.104158 62034 pod_ready.go:86] duration metric: took 5.08337291s for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.107546 62034 pod_ready.go:83] waiting for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.115455 62034 pod_ready.go:94] pod "kube-controller-manager-embed-certs-059363" is "Ready"
I1123 08:57:50.115500 62034 pod_ready.go:86] duration metric: took 7.928459ms for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.119972 62034 pod_ready.go:83] waiting for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.127595 62034 pod_ready.go:94] pod "kube-proxy-sjvcr" is "Ready"
I1123 08:57:50.127628 62034 pod_ready.go:86] duration metric: took 7.626091ms for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.773984 62034 pod_ready.go:83] waiting for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.786424 62034 pod_ready.go:94] pod "kube-scheduler-embed-certs-059363" is "Ready"
I1123 08:57:50.786450 62034 pod_ready.go:86] duration metric: took 12.434457ms for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
I1123 08:57:50.786464 62034 pod_ready.go:40] duration metric: took 5.79400818s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1123 08:57:50.838926 62034 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
I1123 08:57:50.918780 62034 out.go:179] * Done! kubectl is now configured to use "embed-certs-059363" cluster and "default" namespace by default
I1123 08:57:47.146461 62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
I1123 08:57:49.133800 62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:57:49.157740 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:49.330628 62386 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:57:52.066864 62386 ssh_runner.go:235] Completed: sudo systemctl restart docker: (2.736192658s)
I1123 08:57:52.066973 62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:57:52.092926 62386 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1123 08:57:52.092950 62386 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:57:52.092962 62386 kubeadm.go:935] updating node { 192.168.39.87 8443 v1.34.1 docker true true} ...
I1123 08:57:52.093116 62386 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=newest-cni-078196 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.87
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:57:52.093201 62386 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1123 08:57:52.154769 62386 cni.go:84] Creating CNI manager for ""
I1123 08:57:52.154816 62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:57:52.154857 62386 kubeadm.go:85] Using pod CIDR: 10.42.0.0/16
I1123 08:57:52.154889 62386 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.39.87 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-078196 NodeName:newest-cni-078196 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.87"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.87 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:57:52.155043 62386 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.87
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "newest-cni-078196"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.87"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.87"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.42.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.42.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:57:52.155124 62386 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:57:52.170649 62386 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:57:52.170739 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:57:52.186437 62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I1123 08:57:52.209956 62386 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:57:52.238732 62386 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2219 bytes)
I1123 08:57:52.263556 62386 ssh_runner.go:195] Run: grep 192.168.39.87 control-plane.minikube.internal$ /etc/hosts
I1123 08:57:52.269016 62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.39.87 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:57:52.291438 62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:57:52.468471 62386 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:57:52.523082 62386 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196 for IP: 192.168.39.87
I1123 08:57:52.523106 62386 certs.go:195] generating shared ca certs ...
I1123 08:57:52.523125 62386 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.523320 62386 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
I1123 08:57:52.523383 62386 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
I1123 08:57:52.523392 62386 certs.go:257] generating profile certs ...
I1123 08:57:52.523458 62386 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key
I1123 08:57:52.523471 62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt with IP's: []
I1123 08:57:52.657113 62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt ...
I1123 08:57:52.657156 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt: {Name:mkd4a2297a388c5353f24d63692a9eca2de3895a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.657425 62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key ...
I1123 08:57:52.657447 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key: {Name:mk97d3b4437d9c086044675cf55d01816d40a112 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.657646 62386 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4
I1123 08:57:52.657673 62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.39.87]
I1123 08:57:52.753683 62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 ...
I1123 08:57:52.753714 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4: {Name:mkbf555d613a4fba5c26a5d85e984e69fa19d66f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.753910 62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 ...
I1123 08:57:52.753929 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4: {Name:mk86a1d3d78eb2290d7da0f96ec23ec9d83a7382 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.754031 62386 certs.go:382] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt
I1123 08:57:52.754133 62386 certs.go:386] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key
I1123 08:57:52.754190 62386 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key
I1123 08:57:52.754206 62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt with IP's: []
I1123 08:57:52.860620 62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt ...
I1123 08:57:52.860647 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt: {Name:mk8319204c666212061b0efe79d3f0da238ee7e7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.860851 62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key ...
I1123 08:57:52.860877 62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key: {Name:mk66bf3abe86bc12c3af12e371d390dfcbb94d6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:57:52.861117 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
W1123 08:57:52.861164 62386 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
I1123 08:57:52.861180 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:57:52.861225 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
I1123 08:57:52.861277 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
I1123 08:57:52.861316 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
I1123 08:57:52.861376 62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:57:52.861976 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:57:52.899377 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1123 08:57:52.931761 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1123 08:57:52.966281 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1123 08:57:53.007390 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1123 08:57:53.044942 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1123 08:57:53.087195 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1123 08:57:53.132412 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1123 08:57:53.183547 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1123 08:57:53.239854 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
I1123 08:57:53.286333 62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
I1123 08:57:53.334114 62386 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1123 08:57:53.368550 62386 ssh_runner.go:195] Run: openssl version
I1123 08:57:53.379200 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
I1123 08:57:53.402310 62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
I1123 08:57:53.409135 62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
I1123 08:57:53.409206 62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
I1123 08:57:53.420776 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
I1123 08:57:53.439668 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1123 08:57:53.455152 62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:53.463920 62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:53.463999 62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1123 08:57:53.476317 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1123 08:57:53.500779 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
I1123 08:57:53.518199 62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
I1123 08:57:53.524305 62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
I1123 08:57:53.524381 62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
I1123 08:57:53.535728 62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
I1123 08:57:53.552096 62386 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1123 08:57:53.560216 62386 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1123 08:57:53.560306 62386 kubeadm.go:401] StartCluster: {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 Mou
ntOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1123 08:57:53.560470 62386 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1123 08:57:53.580412 62386 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1123 08:57:53.596570 62386 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1123 08:57:53.611293 62386 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1123 08:57:53.630652 62386 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1123 08:57:53.630673 62386 kubeadm.go:158] found existing configuration files:
I1123 08:57:53.630721 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1123 08:57:53.648350 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1123 08:57:53.648419 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1123 08:57:53.668086 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1123 08:57:53.682346 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1123 08:57:53.682427 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1123 08:57:53.696036 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1123 08:57:53.708650 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1123 08:57:53.708729 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1123 08:57:53.721869 62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1123 08:57:53.733930 62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1123 08:57:53.734006 62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1123 08:57:53.747563 62386 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1123 08:57:53.803699 62386 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
I1123 08:57:53.803788 62386 kubeadm.go:319] [preflight] Running pre-flight checks
I1123 08:57:53.933708 62386 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I1123 08:57:53.933907 62386 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1123 08:57:53.934039 62386 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1123 08:57:53.957595 62386 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1123 08:57:53.960282 62386 out.go:252] - Generating certificates and keys ...
I1123 08:57:53.960381 62386 kubeadm.go:319] [certs] Using existing ca certificate authority
I1123 08:57:53.960461 62386 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I1123 08:57:53.226464 62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
I1123 08:57:54.308839 62386 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I1123 08:57:54.462473 62386 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I1123 08:57:54.656673 62386 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I1123 08:57:55.051656 62386 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I1123 08:57:55.893313 62386 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I1123 08:57:55.893649 62386 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
I1123 08:57:56.010218 62386 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I1123 08:57:56.010458 62386 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
I1123 08:57:56.117087 62386 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I1123 08:57:56.436611 62386 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I1123 08:57:56.745597 62386 kubeadm.go:319] [certs] Generating "sa" key and public key
I1123 08:57:56.745835 62386 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1123 08:57:56.988789 62386 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I1123 08:57:57.476516 62386 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1123 08:57:57.662890 62386 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1123 08:57:58.001771 62386 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1123 08:57:58.199479 62386 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1123 08:57:58.201506 62386 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1123 08:57:58.204309 62386 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1123 08:57:58.206280 62386 out.go:252] - Booting up control plane ...
I1123 08:57:58.206413 62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1123 08:57:58.206524 62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1123 08:57:58.206622 62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1123 08:57:58.225366 62386 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1123 08:57:58.225656 62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I1123 08:57:58.233945 62386 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I1123 08:57:58.234118 62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1123 08:57:58.234179 62386 kubeadm.go:319] [kubelet-start] Starting the kubelet
I1123 08:57:58.435406 62386 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1123 08:57:58.435734 62386 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1123 08:57:57.259625 62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: connection refused
I1123 08:58:00.375540 62480 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:58:00.379895 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.380474 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.380511 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.380795 62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
I1123 08:58:00.381087 62480 machine.go:94] provisionDockerMachine start ...
I1123 08:58:00.384347 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.384859 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.384898 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.385108 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:00.385436 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:00.385456 62480 main.go:143] libmachine: About to run SSH command:
hostname
I1123 08:58:00.505124 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1123 08:58:00.505170 62480 buildroot.go:166] provisioning hostname "default-k8s-diff-port-925051"
I1123 08:58:00.509221 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.509702 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.509735 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.509925 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:00.510144 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:00.510161 62480 main.go:143] libmachine: About to run SSH command:
sudo hostname default-k8s-diff-port-925051 && echo "default-k8s-diff-port-925051" | sudo tee /etc/hostname
I1123 08:58:00.644600 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-925051
I1123 08:58:00.648066 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.648604 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.648630 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.648845 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:00.649045 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:00.649060 62480 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sdefault-k8s-diff-port-925051' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-925051/g' /etc/hosts;
else
echo '127.0.1.1 default-k8s-diff-port-925051' | sudo tee -a /etc/hosts;
fi
fi
I1123 08:58:00.768996 62480 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1123 08:58:00.769030 62480 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
I1123 08:58:00.769067 62480 buildroot.go:174] setting up certificates
I1123 08:58:00.769088 62480 provision.go:84] configureAuth start
I1123 08:58:00.772355 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.772869 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.772909 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.775615 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.776035 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.776086 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.776228 62480 provision.go:143] copyHostCerts
I1123 08:58:00.776306 62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
I1123 08:58:00.776319 62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
I1123 08:58:00.776391 62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
I1123 08:58:00.776518 62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
I1123 08:58:00.776529 62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
I1123 08:58:00.776558 62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
I1123 08:58:00.776642 62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
I1123 08:58:00.776653 62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
I1123 08:58:00.776678 62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
I1123 08:58:00.776751 62480 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-925051 san=[127.0.0.1 192.168.83.137 default-k8s-diff-port-925051 localhost minikube]
I1123 08:58:00.949651 62480 provision.go:177] copyRemoteCerts
I1123 08:58:00.949711 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1123 08:58:00.952558 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.952960 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:00.952982 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:00.953136 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:01.044089 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1123 08:58:01.077898 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1123 08:58:01.115919 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
I1123 08:58:01.157254 62480 provision.go:87] duration metric: took 388.131412ms to configureAuth
I1123 08:58:01.157285 62480 buildroot.go:189] setting minikube options for container-runtime
I1123 08:58:01.157510 62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:58:01.160663 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.161248 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:01.161295 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.161496 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:01.161777 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:01.161792 62480 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1123 08:58:01.278322 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1123 08:58:01.278347 62480 buildroot.go:70] root file system type: tmpfs
I1123 08:58:01.278524 62480 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1123 08:58:01.281592 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.282050 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:01.282098 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.282395 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:01.282601 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:01.282650 62480 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1123 08:58:01.426254 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1123 08:58:01.429123 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.429531 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:01.429561 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:01.429727 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:01.429945 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:01.429968 62480 main.go:143] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1123 08:57:59.438296 62386 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.003129845s
I1123 08:57:59.442059 62386 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I1123 08:57:59.442209 62386 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.39.87:8443/livez
I1123 08:57:59.442348 62386 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I1123 08:57:59.442479 62386 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I1123 08:58:01.938904 62386 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 2.497307336s
I1123 08:58:03.405770 62386 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 3.965160338s
I1123 08:58:05.442827 62386 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 6.002687393s
I1123 08:58:05.466318 62386 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1123 08:58:05.495033 62386 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1123 08:58:05.522725 62386 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
I1123 08:58:05.523012 62386 kubeadm.go:319] [mark-control-plane] Marking the node newest-cni-078196 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1123 08:58:05.543260 62386 kubeadm.go:319] [bootstrap-token] Using token: dgrodg.6ciokz1biodl2yci
I1123 08:58:02.622394 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1123 08:58:02.622428 62480 machine.go:97] duration metric: took 2.24132298s to provisionDockerMachine
I1123 08:58:02.622443 62480 start.go:293] postStartSetup for "default-k8s-diff-port-925051" (driver="kvm2")
I1123 08:58:02.622457 62480 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1123 08:58:02.622522 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1123 08:58:02.625753 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.626334 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.626374 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.626567 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:02.732392 62480 ssh_runner.go:195] Run: cat /etc/os-release
I1123 08:58:02.737975 62480 info.go:137] Remote host: Buildroot 2025.02
I1123 08:58:02.738010 62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
I1123 08:58:02.738111 62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
I1123 08:58:02.738225 62480 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
I1123 08:58:02.738341 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1123 08:58:02.755815 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:58:02.790325 62480 start.go:296] duration metric: took 167.864143ms for postStartSetup
I1123 08:58:02.790381 62480 fix.go:56] duration metric: took 20.323185295s for fixHost
I1123 08:58:02.793471 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.793912 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.793950 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.794223 62480 main.go:143] libmachine: Using SSH client type: native
I1123 08:58:02.794447 62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil> [] 0s} 192.168.83.137 22 <nil> <nil>}
I1123 08:58:02.794458 62480 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1123 08:58:02.907310 62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888282.872914256
I1123 08:58:02.907338 62480 fix.go:216] guest clock: 1763888282.872914256
I1123 08:58:02.907348 62480 fix.go:229] Guest: 2025-11-23 08:58:02.872914256 +0000 UTC Remote: 2025-11-23 08:58:02.790385341 +0000 UTC m=+45.999028572 (delta=82.528915ms)
I1123 08:58:02.907369 62480 fix.go:200] guest clock delta is within tolerance: 82.528915ms
I1123 08:58:02.907375 62480 start.go:83] releasing machines lock for "default-k8s-diff-port-925051", held for 20.440202624s
I1123 08:58:02.910604 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.911104 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.911130 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.911758 62480 ssh_runner.go:195] Run: cat /version.json
I1123 08:58:02.911816 62480 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1123 08:58:02.915121 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915430 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915677 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.915710 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915907 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:02.915942 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:02.915932 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:02.916129 62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
I1123 08:58:03.020815 62480 ssh_runner.go:195] Run: systemctl --version
I1123 08:58:03.028066 62480 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1123 08:58:03.036089 62480 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I1123 08:58:03.036168 62480 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1123 08:58:03.059461 62480 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1123 08:58:03.059497 62480 start.go:496] detecting cgroup driver to use...
I1123 08:58:03.059639 62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:58:03.085945 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1123 08:58:03.100188 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1123 08:58:03.114121 62480 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1123 08:58:03.114197 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1123 08:58:03.128502 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:58:03.141941 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1123 08:58:03.155742 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1123 08:58:03.170251 62480 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1123 08:58:03.185473 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1123 08:58:03.199212 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1123 08:58:03.212441 62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1123 08:58:03.225457 62480 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1123 08:58:03.237735 62480 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1123 08:58:03.237807 62480 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1123 08:58:03.251616 62480 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1123 08:58:03.264293 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:03.431052 62480 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1123 08:58:03.484769 62480 start.go:496] detecting cgroup driver to use...
I1123 08:58:03.484887 62480 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1123 08:58:03.515067 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:58:03.538674 62480 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1123 08:58:03.566269 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1123 08:58:03.585483 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:58:03.603778 62480 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1123 08:58:03.640497 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1123 08:58:03.659085 62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1123 08:58:03.687162 62480 ssh_runner.go:195] Run: which cri-dockerd
I1123 08:58:03.694175 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1123 08:58:03.712519 62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1123 08:58:03.741521 62480 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1123 08:58:03.916394 62480 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1123 08:58:04.069031 62480 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1123 08:58:04.069190 62480 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1123 08:58:04.093301 62480 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1123 08:58:04.109417 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:04.272454 62480 ssh_runner.go:195] Run: sudo systemctl restart docker
I1123 08:58:04.931701 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1123 08:58:04.948944 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1123 08:58:04.971544 62480 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1123 08:58:05.005474 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:58:05.031097 62480 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1123 08:58:05.200507 62480 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1123 08:58:05.394816 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:05.619873 62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1123 08:58:05.666855 62480 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1123 08:58:05.685142 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:05.848671 62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1123 08:58:05.996045 62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1123 08:58:06.018056 62480 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1123 08:58:06.018168 62480 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1123 08:58:06.026546 62480 start.go:564] Will wait 60s for crictl version
I1123 08:58:06.026630 62480 ssh_runner.go:195] Run: which crictl
I1123 08:58:06.032819 62480 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1123 08:58:06.084168 62480 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
I1123 08:58:06.084266 62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:58:06.126882 62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1123 08:58:06.163943 62480 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1123 08:58:06.168664 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:06.169284 62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
I1123 08:58:06.169324 62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
I1123 08:58:06.169553 62480 ssh_runner.go:195] Run: grep 192.168.83.1 host.minikube.internal$ /etc/hosts
I1123 08:58:06.176801 62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.83.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:58:06.201834 62480 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubernete
sVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: N
etwork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1123 08:58:06.201979 62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 08:58:06.202051 62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:58:06.228393 62480 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:58:06.228418 62480 docker.go:621] Images already preloaded, skipping extraction
I1123 08:58:06.228478 62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1123 08:58:06.253832 62480 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1123 08:58:06.253872 62480 cache_images.go:86] Images are preloaded, skipping loading
I1123 08:58:06.253886 62480 kubeadm.go:935] updating node { 192.168.83.137 8444 v1.34.1 docker true true} ...
I1123 08:58:06.254046 62480 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-925051 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.83.137
[Install]
config:
{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1123 08:58:06.254117 62480 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1123 08:58:06.333361 62480 cni.go:84] Creating CNI manager for ""
I1123 08:58:06.333408 62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:58:06.333432 62480 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I1123 08:58:06.333457 62480 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.83.137 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-925051 NodeName:default-k8s-diff-port-925051 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.83.137"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.83.137 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/c
erts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1123 08:58:06.333702 62480 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.83.137
bindPort: 8444
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "default-k8s-diff-port-925051"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.83.137"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.83.137"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8444
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1123 08:58:06.333784 62480 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1123 08:58:06.356565 62480 binaries.go:51] Found k8s binaries, skipping transfer
I1123 08:58:06.356666 62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1123 08:58:06.376736 62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (329 bytes)
I1123 08:58:06.412797 62480 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1123 08:58:06.447785 62480 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2235 bytes)
I1123 08:58:06.486793 62480 ssh_runner.go:195] Run: grep 192.168.83.137 control-plane.minikube.internal$ /etc/hosts
I1123 08:58:06.494943 62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.83.137 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1123 08:58:06.522673 62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1123 08:58:06.760714 62480 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1123 08:58:06.816865 62480 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051 for IP: 192.168.83.137
I1123 08:58:06.817014 62480 certs.go:195] generating shared ca certs ...
I1123 08:58:06.817069 62480 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1123 08:58:06.817298 62480 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
I1123 08:58:06.817470 62480 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
I1123 08:58:06.817524 62480 certs.go:257] generating profile certs ...
I1123 08:58:06.817689 62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/client.key
I1123 08:58:06.817768 62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/apiserver.key.3e63079d
I1123 08:58:06.817847 62480 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/proxy-client.key
I1123 08:58:06.818039 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
W1123 08:58:06.818089 62480 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
I1123 08:58:06.818100 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
I1123 08:58:06.818136 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
I1123 08:58:06.818179 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
I1123 08:58:06.818209 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
I1123 08:58:06.818301 62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
I1123 08:58:06.819187 62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1123 08:58:05.545959 62386 out.go:252] - Configuring RBAC rules ...
I1123 08:58:05.546132 62386 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1123 08:58:05.554804 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1123 08:58:05.569723 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1123 08:58:05.574634 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1123 08:58:05.579213 62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1123 08:58:05.585176 62386 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1123 08:58:05.855390 62386 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1123 08:58:06.305498 62386 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
I1123 08:58:06.860572 62386 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
I1123 08:58:06.862132 62386 kubeadm.go:319]
I1123 08:58:06.862300 62386 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
I1123 08:58:06.862315 62386 kubeadm.go:319]
I1123 08:58:06.862459 62386 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
I1123 08:58:06.862488 62386 kubeadm.go:319]
I1123 08:58:06.862544 62386 kubeadm.go:319] mkdir -p $HOME/.kube
I1123 08:58:06.862628 62386 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1123 08:58:06.862700 62386 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1123 08:58:06.862710 62386 kubeadm.go:319]
I1123 08:58:06.862788 62386 kubeadm.go:319] Alternatively, if you are the root user, you can run:
I1123 08:58:06.862797 62386 kubeadm.go:319]
I1123 08:58:06.862866 62386 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf
I1123 08:58:06.862875 62386 kubeadm.go:319]
I1123 08:58:06.862984 62386 kubeadm.go:319] You should now deploy a pod network to the cluster.
I1123 08:58:06.863098 62386 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1123 08:58:06.863220 62386 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1123 08:58:06.863243 62386 kubeadm.go:319]
I1123 08:58:06.863353 62386 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
I1123 08:58:06.863463 62386 kubeadm.go:319] and service account keys on each node and then running the following as root:
I1123 08:58:06.863473 62386 kubeadm.go:319]
I1123 08:58:06.863589 62386 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
I1123 08:58:06.863736 62386 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a \
I1123 08:58:06.863769 62386 kubeadm.go:319] --control-plane
I1123 08:58:06.863778 62386 kubeadm.go:319]
I1123 08:58:06.863904 62386 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
I1123 08:58:06.863913 62386 kubeadm.go:319]
I1123 08:58:06.864056 62386 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
I1123 08:58:06.864229 62386 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a
I1123 08:58:06.865336 62386 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1123 08:58:06.865367 62386 cni.go:84] Creating CNI manager for ""
I1123 08:58:06.865396 62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1123 08:58:06.867294 62386 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1123 08:58:06.868866 62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1123 08:58:06.887652 62386 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1123 08:58:06.925093 62386 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1123 08:58:06.925265 62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:58:06.925355 62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes newest-cni-078196 minikube.k8s.io/updated_at=2025_11_23T08_58_06_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e minikube.k8s.io/name=newest-cni-078196 minikube.k8s.io/primary=true
I1123 08:58:07.139216 62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:58:07.139367 62386 ops.go:34] apiserver oom_adj: -16
I1123 08:58:07.639626 62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:58:08.140356 62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1123 08:58:08.639822 62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
==> Docker <==
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.403847294Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530278754Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530380987Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 23 08:57:20 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:20Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541222738Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541313635Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544639412Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544665809Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.895558802Z" level=info msg="ignoring event" container=5858e2fd1e0f544e020a845d1e9aa15e86c2117c0ebff9dfe1b6f4d96f844434 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Nov 23 08:57:21 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/f70de02d77443d2041cfe03c25cb36b6f758dd4e678353419ea55ac106e8b68a/resolv.conf as [nameserver 10.96.0.10 search kubernetes-dashboard.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Nov 23 08:57:32 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:32.990740143Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076597693Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076828182Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 23 08:57:33 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:33Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.550212350Z" level=info msg="ignoring event" container=1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-lp6jk_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"9a49ebff42d5eef5c3e23db2e1ab337396080dea6c13220062ba5e0e48a95cc8\""
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.760065184Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863488316Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863610785Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 23 08:58:08 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:08Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.897944813Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.899313923Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914470304Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914503647Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
371de4a468901 6e38f40d628db 3 seconds ago Running storage-provisioner 2 a97e7e7100c3a storage-provisioner kube-system
57ebcdb97431d kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 51 seconds ago Running kubernetes-dashboard 0 644b3c0a17fe8 kubernetes-dashboard-855c9754f9-zh9mv kubernetes-dashboard
58768e42678e9 56cc512116c8f 53 seconds ago Running busybox 1 c39a5f42630b0 busybox default
f7e183883855c 52546a367cc9e 53 seconds ago Running coredns 1 86281d14c8f1e coredns-66bc5c9577-nj6pk kube-system
1f0a2f0aefa9b 6e38f40d628db About a minute ago Exited storage-provisioner 1 a97e7e7100c3a storage-provisioner kube-system
8c0537e27a6fb fc25172553d79 About a minute ago Running kube-proxy 1 dd983c999b8f4 kube-proxy-wlb9w kube-system
8deb34aee6ea1 5f1f5298c888d About a minute ago Running etcd 1 ccce046e98c9b etcd-no-preload-019660 kube-system
1a4750ff7e8cb c80c8dbafe7dd About a minute ago Running kube-controller-manager 1 e18e6fb700516 kube-controller-manager-no-preload-019660 kube-system
6929fc4394d1d c3994bc696102 About a minute ago Running kube-apiserver 1 b493d9303993d kube-apiserver-no-preload-019660 kube-system
266be5a40ca65 7dd6aaa1717ab About a minute ago Running kube-scheduler 1 a1f3f18719102 kube-scheduler-no-preload-019660 kube-system
7e459e5ac3043 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 c0e79a536f316 busybox default
b5d2ec6064039 52546a367cc9e 2 minutes ago Exited coredns 0 92a72987832f3 coredns-66bc5c9577-nj6pk kube-system
4aea324009fdd fc25172553d79 2 minutes ago Exited kube-proxy 0 adcf7215f30c5 kube-proxy-wlb9w kube-system
57bb06d26ab69 7dd6aaa1717ab 2 minutes ago Exited kube-scheduler 0 0e3f3ba5c2b8c kube-scheduler-no-preload-019660 kube-system
78433f5a1dee5 5f1f5298c888d 2 minutes ago Exited etcd 0 c90dfb42b9b72 etcd-no-preload-019660 kube-system
e0963762dabe6 c80c8dbafe7dd 2 minutes ago Exited kube-controller-manager 0 796e38a439eca kube-controller-manager-no-preload-019660 kube-system
51985d9c2b5e4 c3994bc696102 2 minutes ago Exited kube-apiserver 0 8ec1927039422 kube-apiserver-no-preload-019660 kube-system
==> coredns [b5d2ec606403] <==
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
[INFO] Reloading complete
[INFO] 127.0.0.1:42110 - 29445 "HINFO IN 9017480915883545082.4400091200596631812. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.103448715s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [f7e183883855] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:55083 - 4317 "HINFO IN 4704850718228764652.4547352497864188913. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.118220473s
==> describe nodes <==
Name: no-preload-019660
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=no-preload-019660
kubernetes.io/os=linux
minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e
minikube.k8s.io/name=no-preload-019660
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_23T08_55_22_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 23 Nov 2025 08:55:18 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: no-preload-019660
AcquireTime: <unset>
RenewTime: Sun, 23 Nov 2025 08:58:07 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:55:14 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:55:14 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:55:14 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 23 Nov 2025 08:58:07 +0000 Sun, 23 Nov 2025 08:57:11 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.50.40
Hostname: no-preload-019660
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: 5db77235f15f4a52ad7c561433b2bbe5
System UUID: 5db77235-f15f-4a52-ad7c-561433b2bbe5
Boot ID: 7c4938cf-e087-4d48-94a0-7660c53890e7
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.1
Kubelet Version: v1.34.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m7s
kube-system coredns-66bc5c9577-nj6pk 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m44s
kube-system etcd-no-preload-019660 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m50s
kube-system kube-apiserver-no-preload-019660 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m50s
kube-system kube-controller-manager-no-preload-019660 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m50s
kube-system kube-proxy-wlb9w 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m45s
kube-system kube-scheduler-no-preload-019660 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m50s
kube-system metrics-server-746fcd58dc-tg8q5 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 117s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m42s
kubernetes-dashboard dashboard-metrics-scraper-6ffb444bf9-4965t 0 (0%) 0 (0%) 0 (0%) 0 (0%) 65s
kubernetes-dashboard kubernetes-dashboard-855c9754f9-zh9mv 0 (0%) 0 (0%) 0 (0%) 0 (0%) 65s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m41s kube-proxy
Normal Starting 67s kube-proxy
Normal NodeHasSufficientMemory 2m59s (x8 over 2m59s) kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m59s (x8 over 2m59s) kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m59s (x7 over 2m59s) kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m59s kubelet Updated Node Allocatable limit across pods
Normal Starting 2m50s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 2m50s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2m50s kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m50s kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m50s kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
Normal NodeReady 2m46s kubelet Node no-preload-019660 status is now: NodeReady
Normal RegisteredNode 2m45s node-controller Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
Normal Starting 77s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 77s kubelet Updated Node Allocatable limit across pods
Normal NodeHasNoDiskPressure 76s (x8 over 77s) kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 76s (x7 over 77s) kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
Normal NodeHasSufficientMemory 76s (x8 over 77s) kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Warning Rebooted 71s kubelet Node no-preload-019660 has been rebooted, boot id: 7c4938cf-e087-4d48-94a0-7660c53890e7
Normal RegisteredNode 68s node-controller Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
Normal Starting 5s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 5s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 5s kubelet Node no-preload-019660 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5s kubelet Node no-preload-019660 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5s kubelet Node no-preload-019660 status is now: NodeHasSufficientPID
==> dmesg <==
[Nov23 08:56] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.001555] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.004890] (rpcbind)[121]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.922269] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.557715] kauditd_printk_skb: 29 callbacks suppressed
[ +0.102404] kauditd_printk_skb: 421 callbacks suppressed
[Nov23 08:57] kauditd_printk_skb: 165 callbacks suppressed
[ +4.416704] kauditd_printk_skb: 134 callbacks suppressed
[ +0.028951] kauditd_printk_skb: 144 callbacks suppressed
[ +1.212600] kauditd_printk_skb: 93 callbacks suppressed
[ +0.188677] kauditd_printk_skb: 78 callbacks suppressed
[Nov23 08:58] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [78433f5a1dee] <==
{"level":"info","ts":"2025-11-23T08:55:27.960210Z","caller":"traceutil/trace.go:172","msg":"trace[1913795349] transaction","detail":"{read_only:false; response_revision:359; number_of_response:1; }","duration":"132.125474ms","start":"2025-11-23T08:55:27.828070Z","end":"2025-11-23T08:55:27.960197Z","steps":["trace[1913795349] 'process raft request' (duration: 130.470237ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T08:55:27.961326Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"115.093447ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/service-cidrs-controller\" limit:1 ","response":"range_response_count:1 size:214"}
{"level":"info","ts":"2025-11-23T08:55:27.961420Z","caller":"traceutil/trace.go:172","msg":"trace[1979015044] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/service-cidrs-controller; range_end:; response_count:1; response_revision:360; }","duration":"115.232691ms","start":"2025-11-23T08:55:27.846179Z","end":"2025-11-23T08:55:27.961412Z","steps":["trace[1979015044] 'agreement among raft nodes before linearized reading' (duration: 114.979531ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:27.964671Z","caller":"traceutil/trace.go:172","msg":"trace[1629415560] transaction","detail":"{read_only:false; response_revision:361; number_of_response:1; }","duration":"113.511815ms","start":"2025-11-23T08:55:27.851149Z","end":"2025-11-23T08:55:27.964661Z","steps":["trace[1629415560] 'process raft request' (duration: 111.933576ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:27.965851Z","caller":"traceutil/trace.go:172","msg":"trace[339398896] transaction","detail":"{read_only:false; response_revision:362; number_of_response:1; }","duration":"103.77975ms","start":"2025-11-23T08:55:27.862061Z","end":"2025-11-23T08:55:27.965841Z","steps":["trace[339398896] 'process raft request' (duration: 102.247209ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:52.232221Z","caller":"traceutil/trace.go:172","msg":"trace[991594023] transaction","detail":"{read_only:false; response_revision:463; number_of_response:1; }","duration":"138.295615ms","start":"2025-11-23T08:55:52.093898Z","end":"2025-11-23T08:55:52.232193Z","steps":["trace[991594023] 'process raft request' (duration: 138.148011ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:55:53.110050Z","caller":"traceutil/trace.go:172","msg":"trace[1408655835] transaction","detail":"{read_only:false; response_revision:464; number_of_response:1; }","duration":"111.465311ms","start":"2025-11-23T08:55:52.998570Z","end":"2025-11-23T08:55:53.110036Z","steps":["trace[1408655835] 'process raft request' (duration: 111.386468ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:56:16.343294Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
{"level":"info","ts":"2025-11-23T08:56:16.343638Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
{"level":"error","ts":"2025-11-23T08:56:16.344971Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-11-23T08:56:23.350843Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
{"level":"error","ts":"2025-11-23T08:56:23.350926Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-11-23T08:56:23.350948Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"113a167c41258c81","current-leader-member-id":"113a167c41258c81"}
{"level":"info","ts":"2025-11-23T08:56:23.351067Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
{"level":"info","ts":"2025-11-23T08:56:23.351076Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
{"level":"warn","ts":"2025-11-23T08:56:23.353233Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"warn","ts":"2025-11-23T08:56:23.353335Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
{"level":"error","ts":"2025-11-23T08:56:23.353344Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"warn","ts":"2025-11-23T08:56:23.353381Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
{"level":"warn","ts":"2025-11-23T08:56:23.353419Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
{"level":"error","ts":"2025-11-23T08:56:23.353428Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-11-23T08:56:23.359157Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.50.40:2380"}
{"level":"error","ts":"2025-11-23T08:56:23.359253Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
{"level":"info","ts":"2025-11-23T08:56:23.359488Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.50.40:2380"}
{"level":"info","ts":"2025-11-23T08:56:23.359540Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
==> etcd [8deb34aee6ea] <==
{"level":"warn","ts":"2025-11-23T08:57:00.099710Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44330","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.113877Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44336","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.136374Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44356","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.145346Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44368","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.154857Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44394","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.171909Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44414","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.185801Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44422","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.191640Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44442","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.202370Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44456","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.212078Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44464","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.224299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44490","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.239703Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44498","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.248343Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44522","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.259201Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44546","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.280884Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44576","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.303755Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44586","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.322303Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44610","server-name":"","error":"EOF"}
{"level":"warn","ts":"2025-11-23T08:57:00.379317Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44628","server-name":"","error":"EOF"}
{"level":"info","ts":"2025-11-23T08:57:16.914297Z","caller":"traceutil/trace.go:172","msg":"trace[282693566] transaction","detail":"{read_only:false; response_revision:710; number_of_response:1; }","duration":"165.899912ms","start":"2025-11-23T08:57:16.748378Z","end":"2025-11-23T08:57:16.914278Z","steps":["trace[282693566] 'process raft request' (duration: 165.731904ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:57:17.891916Z","caller":"traceutil/trace.go:172","msg":"trace[845827594] linearizableReadLoop","detail":"{readStateIndex:756; appliedIndex:756; }","duration":"162.635779ms","start":"2025-11-23T08:57:17.729260Z","end":"2025-11-23T08:57:17.891896Z","steps":["trace[845827594] 'read index received' (duration: 162.630099ms)","trace[845827594] 'applied index is now lower than readState.Index' (duration: 4.7µs)"],"step_count":2}
{"level":"warn","ts":"2025-11-23T08:57:17.892195Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"162.847621ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-23T08:57:17.892577Z","caller":"traceutil/trace.go:172","msg":"trace[1595377469] transaction","detail":"{read_only:false; response_revision:712; number_of_response:1; }","duration":"262.918033ms","start":"2025-11-23T08:57:17.629632Z","end":"2025-11-23T08:57:17.892550Z","steps":["trace[1595377469] 'process raft request' (duration: 262.820051ms)"],"step_count":1}
{"level":"info","ts":"2025-11-23T08:57:17.892238Z","caller":"traceutil/trace.go:172","msg":"trace[1998076635] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:711; }","duration":"162.976ms","start":"2025-11-23T08:57:17.729254Z","end":"2025-11-23T08:57:17.892230Z","steps":["trace[1998076635] 'agreement among raft nodes before linearized reading' (duration: 162.824778ms)"],"step_count":1}
{"level":"warn","ts":"2025-11-23T08:57:17.894716Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"130.045976ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2025-11-23T08:57:17.894762Z","caller":"traceutil/trace.go:172","msg":"trace[1496763416] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:712; }","duration":"130.105624ms","start":"2025-11-23T08:57:17.764650Z","end":"2025-11-23T08:57:17.894756Z","steps":["trace[1496763416] 'agreement among raft nodes before linearized reading' (duration: 130.023549ms)"],"step_count":1}
==> kernel <==
08:58:12 up 1 min, 0 users, load average: 1.58, 0.55, 0.20
Linux no-preload-019660 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Nov 19 01:10:03 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [51985d9c2b5e] <==
W1123 08:56:25.707408 1 logging.go:55] [core] [Channel #135 SubChannel #137]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.731493 1 logging.go:55] [core] [Channel #63 SubChannel #65]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.801488 1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.835630 1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.837271 1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.885167 1 logging.go:55] [core] [Channel #47 SubChannel #49]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.919480 1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.953337 1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:25.992450 1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.001050 1 logging.go:55] [core] [Channel #175 SubChannel #177]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.027017 1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.043092 1 logging.go:55] [core] [Channel #159 SubChannel #161]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.075821 1 logging.go:55] [core] [Channel #83 SubChannel #85]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.087192 1 logging.go:55] [core] [Channel #67 SubChannel #69]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.108299 1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.143125 1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.143847 1 logging.go:55] [core] [Channel #27 SubChannel #29]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.168146 1 logging.go:55] [core] [Channel #31 SubChannel #33]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.194296 1 logging.go:55] [core] [Channel #55 SubChannel #57]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.217089 1 logging.go:55] [core] [Channel #143 SubChannel #145]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.284415 1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.304057 1 logging.go:55] [core] [Channel #127 SubChannel #129]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.351096 1 logging.go:55] [core] [Channel #151 SubChannel #153]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.358315 1 logging.go:55] [core] [Channel #107 SubChannel #109]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1123 08:56:26.398513 1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [6929fc4394d1] <==
W1123 08:57:02.240589 1 handler_proxy.go:99] no RequestInfo found in the context
E1123 08:57:02.241169 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1123 08:57:02.242304 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1123 08:57:03.447397 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1123 08:57:03.566737 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1123 08:57:03.633482 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1123 08:57:03.665173 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1123 08:57:04.456742 1 controller.go:667] quota admission added evaluator for: endpoints
I1123 08:57:04.822296 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1123 08:57:04.922886 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1123 08:57:06.855489 1 controller.go:667] quota admission added evaluator for: namespaces
I1123 08:57:07.352680 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.100.252.132"}
I1123 08:57:07.386303 1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.100.154.160"}
W1123 08:58:06.568683 1 handler_proxy.go:99] no RequestInfo found in the context
E1123 08:58:06.568889 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1123 08:58:06.569001 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1123 08:58:06.583847 1 handler_proxy.go:99] no RequestInfo found in the context
E1123 08:58:06.587393 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1123 08:58:06.587452 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [1a4750ff7e8c] <==
I1123 08:57:04.478449 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1123 08:57:04.488570 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1123 08:57:04.494373 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I1123 08:57:04.481772 1 shared_informer.go:356] "Caches are synced" controller="PV protection"
I1123 08:57:04.502443 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I1123 08:57:04.502540 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I1123 08:57:04.506670 1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
I1123 08:57:04.510647 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1123 08:57:04.566367 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
I1123 08:57:04.591835 1 shared_informer.go:349] "Waiting for caches to sync" controller="garbage collector"
I1123 08:57:04.750206 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1123 08:57:04.750262 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1123 08:57:04.750270 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1123 08:57:04.793332 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
E1123 08:57:07.066560 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.102507 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.134848 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.147364 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.152054 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.176406 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.177162 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1123 08:57:07.185205 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1123 08:57:14.479438 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
I1123 08:58:06.668391 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
E1123 08:58:06.670861 1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
==> kube-controller-manager [e0963762dabe] <==
I1123 08:55:27.305673 1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
I1123 08:55:27.305856 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I1123 08:55:27.305946 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I1123 08:55:27.307430 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
I1123 08:55:27.307491 1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
I1123 08:55:27.307769 1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
I1123 08:55:27.308002 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1123 08:55:27.311526 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I1123 08:55:27.320061 1 shared_informer.go:356] "Caches are synced" controller="node"
I1123 08:55:27.320143 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I1123 08:55:27.320176 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I1123 08:55:27.320181 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1123 08:55:27.320186 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1123 08:55:27.323691 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1123 08:55:27.332119 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1123 08:55:27.332230 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1123 08:55:27.332307 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="no-preload-019660"
I1123 08:55:27.332344 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I1123 08:55:27.353034 1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
I1123 08:55:27.353188 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1123 08:55:27.353234 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1123 08:55:27.353253 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1123 08:55:27.355630 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1123 08:55:27.356002 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1123 08:55:27.484870 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="no-preload-019660" podCIDRs=["10.244.0.0/24"]
==> kube-proxy [4aea324009fd] <==
I1123 08:55:29.781436 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1123 08:55:29.882143 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1123 08:55:29.882176 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
E1123 08:55:29.882244 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1123 08:55:30.206875 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1123 08:55:30.209951 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1123 08:55:30.210016 1 server_linux.go:132] "Using iptables Proxier"
I1123 08:55:30.389394 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1123 08:55:30.398584 1 server.go:527] "Version info" version="v1.34.1"
I1123 08:55:30.411854 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:55:30.436371 1 config.go:106] "Starting endpoint slice config controller"
I1123 08:55:30.436400 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1123 08:55:30.436421 1 config.go:403] "Starting serviceCIDR config controller"
I1123 08:55:30.436428 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1123 08:55:30.441802 1 config.go:200] "Starting service config controller"
I1123 08:55:30.441827 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1123 08:55:30.456879 1 config.go:309] "Starting node config controller"
I1123 08:55:30.457052 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1123 08:55:30.457180 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1123 08:55:30.537976 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1123 08:55:30.542627 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1123 08:55:30.553889 1 shared_informer.go:356] "Caches are synced" controller="service config"
==> kube-proxy [8c0537e27a6f] <==
I1123 08:57:04.109885 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1123 08:57:04.212001 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1123 08:57:04.212377 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
E1123 08:57:04.212492 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1123 08:57:04.308881 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1123 08:57:04.309495 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1123 08:57:04.309923 1 server_linux.go:132] "Using iptables Proxier"
I1123 08:57:04.335219 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1123 08:57:04.338659 1 server.go:527] "Version info" version="v1.34.1"
I1123 08:57:04.339118 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:57:04.356711 1 config.go:200] "Starting service config controller"
I1123 08:57:04.358780 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1123 08:57:04.357281 1 config.go:403] "Starting serviceCIDR config controller"
I1123 08:57:04.360751 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1123 08:57:04.359340 1 config.go:309] "Starting node config controller"
I1123 08:57:04.361083 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1123 08:57:04.361217 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1123 08:57:04.357261 1 config.go:106] "Starting endpoint slice config controller"
I1123 08:57:04.361454 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1123 08:57:04.461112 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1123 08:57:04.461168 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1123 08:57:04.466392 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
==> kube-scheduler [266be5a40ca6] <==
I1123 08:56:59.176913 1 serving.go:386] Generated self-signed cert in-memory
W1123 08:57:01.157665 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1123 08:57:01.157869 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1123 08:57:01.157944 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W1123 08:57:01.158050 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1123 08:57:01.217478 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.1"
I1123 08:57:01.217604 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1123 08:57:01.228584 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1123 08:57:01.229023 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1123 08:57:01.231067 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1123 08:57:01.231467 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1123 08:57:01.329575 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kube-scheduler [57bb06d26ab6] <==
E1123 08:55:19.477132 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1123 08:55:19.476999 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E1123 08:55:19.477074 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1123 08:55:19.478217 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1123 08:55:19.478832 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E1123 08:55:19.479554 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1123 08:55:19.480141 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
E1123 08:55:19.480165 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1123 08:55:19.480360 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1123 08:55:19.480372 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1123 08:55:19.480530 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1123 08:55:19.480623 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1123 08:55:19.481197 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1123 08:55:19.482165 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1123 08:55:20.289908 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1123 08:55:20.337370 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1123 08:55:20.366302 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1123 08:55:20.425798 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1123 08:55:20.483335 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E1123 08:55:20.494282 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
I1123 08:55:23.055993 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1123 08:56:16.316839 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1123 08:56:16.317595 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1123 08:56:16.317742 1 server.go:265] "[graceful-termination] secure server is exiting"
E1123 08:56:16.317790 1 run.go:72] "command failed" err="finished without leader elect"
==> kubelet <==
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220241 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-k8s-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220309 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-flexvolume-dir\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220345 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-kubeconfig\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220366 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-usr-share-ca-certificates\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220392 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-ca-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220412 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-usr-share-ca-certificates\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220431 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-ca-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220451 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-k8s-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220473 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/0bd61e39ef27cab83cc049d81d34254c-kubeconfig\") pod \"kube-scheduler-no-preload-019660\" (UID: \"0bd61e39ef27cab83cc049d81d34254c\") " pod="kube-system/kube-scheduler-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.223516 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-certs\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.224048 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-data\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.281626 4357 apiserver.go:52] "Watching apiserver"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.354823 4357 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428002 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/92a336c6-9d96-4484-8903-9542086c298e-tmp\") pod \"storage-provisioner\" (UID: \"92a336c6-9d96-4484-8903-9542086c298e\") " pod="kube-system/storage-provisioner"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428072 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-xtables-lock\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428146 4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-lib-modules\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.612741 4357 scope.go:117] "RemoveContainer" containerID="1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.874748 4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.877286 4357 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878430 4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-4965t_kubernetes-dashboard(d4a9e601-4647-40d6-a5d8-db1e8e067281): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878855 4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-4965t" podUID="d4a9e601-4647-40d6-a5d8-db1e8e067281"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.918928 4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.919810 4357 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921110 4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-tg8q5_kube-system(fb0df7df-58f1-4b52-8193-e19d66dd95bf): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921171 4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-tg8q5" podUID="fb0df7df-58f1-4b52-8193-e19d66dd95bf"
==> kubernetes-dashboard [57ebcdb97431] <==
2025/11/23 08:57:20 Starting overwatch
2025/11/23 08:57:20 Using namespace: kubernetes-dashboard
2025/11/23 08:57:20 Using in-cluster config to connect to apiserver
2025/11/23 08:57:20 Using secret token for csrf signing
2025/11/23 08:57:20 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/11/23 08:57:20 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/11/23 08:57:20 Successful initial request to the apiserver, version: v1.34.1
2025/11/23 08:57:20 Generating JWE encryption key
2025/11/23 08:57:20 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/11/23 08:57:20 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/11/23 08:57:21 Initializing JWE encryption key from synchronized object
2025/11/23 08:57:21 Creating in-cluster Sidecar client
2025/11/23 08:57:21 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/11/23 08:57:21 Serving insecurely on HTTP port: 9090
2025/11/23 08:58:06 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [1f0a2f0aefa9] <==
I1123 08:57:03.436717 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1123 08:57:33.518183 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [371de4a46890] <==
I1123 08:58:09.007550 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1123 08:58:09.042381 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1123 08:58:09.044488 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1123 08:58:09.057366 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
W1123 08:58:12.516323 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:269: (dbg) Run: kubectl --context no-preload-019660 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:282: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1 (93.005208ms)
** stderr **
Error from server (NotFound): pods "metrics-server-746fcd58dc-tg8q5" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-4965t" not found
** /stderr **
helpers_test.go:287: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1
--- FAIL: TestStartStop/group/no-preload/serial/Pause (40.50s)