=== RUN TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p default-k8s-diff-port-311562 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 pause -p default-k8s-diff-port-311562 --alsologtostderr -v=1: (1.48059884s)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562: exit status 2 (15.76689873s)
                                                
                                                -- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: post-pause apiserver status = "Stopped"; want = "Paused"
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                E1102 13:46:25.333023 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/custom-flannel-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.082450 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.088969 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.100445 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.121929 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.163697 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.245227 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.406821 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:26.728573 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:27.370723 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:28.652716 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562: exit status 2 (15.767351435s)
                                                
                                                -- stdout --
Stopped
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p default-k8s-diff-port-311562 --alsologtostderr -v=1
E1102 13:46:36.141189 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.147719 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.159122 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.180577 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.222217 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.304047 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.336582 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/enable-default-cni-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.466116 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1102 13:46:36.787633 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                E1102 13:46:37.429135 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                helpers_test.go:252: <<< TestStartStop/group/default-k8s-diff-port/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p default-k8s-diff-port-311562 logs -n 25
E1102 13:46:38.710775 13270 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/false-915875/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p default-k8s-diff-port-311562 logs -n 25: (1.596229688s)
helpers_test.go:260: TestStartStop/group/default-k8s-diff-port/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ delete │ -p newest-cni-147975 │ newest-cni-147975 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ image │ default-k8s-diff-port-311562 image list --format=json │ default-k8s-diff-port-311562 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ pause │ -p default-k8s-diff-port-311562 --alsologtostderr -v=1 │ default-k8s-diff-port-311562 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ delete │ -p newest-cni-147975 │ newest-cni-147975 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which crictl │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which rsync │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which VBoxService │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which VBoxControl │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which wget │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which socat │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which git │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which podman │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which iptables │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which docker │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which curl │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /data | grep /data │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/minikube | grep /var/lib/minikube │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/cni | grep /var/lib/cni │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/docker | grep /var/lib/docker │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND' │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ delete │ -p guest-929077 │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ unpause │ -p default-k8s-diff-port-311562 --alsologtostderr -v=1 │ default-k8s-diff-port-311562 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/02 13:45:27
Running on machine: ubuntu-20-agent-7
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1102 13:45:27.814942 52806 out.go:360] Setting OutFile to fd 1 ...
I1102 13:45:27.815219 52806 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1102 13:45:27.815230 52806 out.go:374] Setting ErrFile to fd 2...
I1102 13:45:27.815235 52806 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1102 13:45:27.815479 52806 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21808-9383/.minikube/bin
I1102 13:45:27.816019 52806 out.go:368] Setting JSON to false
	I1102 13:45:27.816916   52806 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":5278,"bootTime":1762085850,"procs":202,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1043-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
                                                
                                                I1102 13:45:27.817007 52806 start.go:143] virtualization: kvm guest
I1102 13:45:27.819158 52806 out.go:179] * [embed-certs-705938] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1102 13:45:27.820405 52806 notify.go:221] Checking for updates...
I1102 13:45:27.820434 52806 out.go:179] - MINIKUBE_LOCATION=21808
I1102 13:45:27.821972 52806 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1102 13:45:27.823419 52806 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:45:27.824677 52806 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21808-9383/.minikube
I1102 13:45:27.825909 52806 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1102 13:45:27.827139 52806 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1102 13:45:27.828747 52806 config.go:182] Loaded profile config "default-k8s-diff-port-311562": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:27.828850 52806 config.go:182] Loaded profile config "guest-929077": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v0.0.0
I1102 13:45:27.828938 52806 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:27.829036 52806 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:27.829125 52806 driver.go:422] Setting default libvirt URI to qemu:///system
I1102 13:45:27.864969 52806 out.go:179] * Using the kvm2 driver based on user configuration
I1102 13:45:27.866282 52806 start.go:309] selected driver: kvm2
I1102 13:45:27.866299 52806 start.go:930] validating driver "kvm2" against <nil>
	I1102 13:45:27.866311   52806 start.go:941] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
                                                
                                                I1102 13:45:27.867320 52806 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1102 13:45:27.867666 52806 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1102 13:45:27.867720 52806 cni.go:84] Creating CNI manager for ""
I1102 13:45:27.867784 52806 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:27.867797 52806 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I1102 13:45:27.867857 52806 start.go:353] cluster config:
	{Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Container
                                                
                                                Runtime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgent
                                                
                                                PID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:45:27.867993   52806 iso.go:125] acquiring lock: {Name:mk4c692b2fc885c991be3e19f361e45d770e6035 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:45:27.870465 52806 out.go:179] * Starting "embed-certs-705938" primary control-plane node in "embed-certs-705938" cluster
I1102 13:45:24.258413 52157 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:45:24.261942 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:24.262553   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:24.262596 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:24.262847 52157 ssh_runner.go:195] Run: grep 192.168.83.1 host.minikube.internal$ /etc/hosts
	I1102 13:45:24.267803   52157 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.83.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                	I1102 13:45:24.288892   52157 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-311562 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubernete
                                                
                                                sVersion:v1.34.1 ClusterName:default-k8s-diff-port-311562 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.253 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: N
                                                
                                                etwork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:45:24.289170 52157 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:45:24.289255   52157 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:24.312158 52157 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:45:24.312186 52157 docker.go:621] Images already preloaded, skipping extraction
	I1102 13:45:24.312243   52157 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:24.336173 52157 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:45:24.336205 52157 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:45:24.336218   52157 kubeadm.go:935] updating node { 192.168.83.253 8444 v1.34.1 docker true true} ...
                                                
                                                I1102 13:45:24.336363 52157 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-311562 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.83.253
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-311562 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:45:24.336444   52157 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:45:24.403493 52157 cni.go:84] Creating CNI manager for ""
I1102 13:45:24.403547 52157 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:24.403566 52157 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1102 13:45:24.403600   52157 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.83.253 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-311562 NodeName:default-k8s-diff-port-311562 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.83.253"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.83.253 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/c
                                                
                                                erts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:45:24.403780 52157 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.83.253
bindPort: 8444
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "default-k8s-diff-port-311562"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.83.253"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.83.253"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8444
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:45:24.403864 52157 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:45:24.419144 52157 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:45:24.419209 52157 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:45:24.433296 52157 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (329 bytes)
I1102 13:45:24.456694 52157 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:45:24.481139 52157 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2235 bytes)
I1102 13:45:24.508847 52157 ssh_runner.go:195] Run: grep 192.168.83.253 control-plane.minikube.internal$ /etc/hosts
	I1102 13:45:24.513455   52157 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.83.253	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:45:24.539087 52157 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:24.723252 52157 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:24.745506 52157 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562 for IP: 192.168.83.253
I1102 13:45:24.745533 52157 certs.go:195] generating shared ca certs ...
	I1102 13:45:24.745553   52157 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:24.745749 52157 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:45:24.745818 52157 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:45:24.745830 52157 certs.go:257] generating profile certs ...
I1102 13:45:24.745960 52157 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/client.key
I1102 13:45:24.746049 52157 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/apiserver.key.ad150ec8
I1102 13:45:24.746119 52157 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/proxy-client.key
I1102 13:45:24.746278 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:24.746379 52157 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:24.746397 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:24.746438 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:24.746473 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:24.746506 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:24.746564 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:24.747319 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:45:24.816279 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:45:24.864897 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:45:24.902332 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:45:24.935953 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1440 bytes)
I1102 13:45:24.990314 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1102 13:45:25.027803 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:45:25.074608 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1102 13:45:25.120662 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:25.166354 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:25.209580 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:25.249272 52157 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:45:25.279712 52157 ssh_runner.go:195] Run: openssl version
I1102 13:45:25.287027 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:25.306535 52157 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:25.313675 52157 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:25.313807 52157 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:25.321843 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:25.338104 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:25.357307 52157 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:25.363326 52157 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:25.363444 52157 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:25.373914 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:25.386652 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:25.405642 52157 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:25.411303 52157 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:25.411402 52157 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:25.419049 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:25.431308 52157 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:45:25.438509 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1102 13:45:25.449100 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1102 13:45:25.456907 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1102 13:45:25.467179 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1102 13:45:25.475514 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1102 13:45:25.483363 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1102 13:45:25.491312   52157 kubeadm.go:401] StartCluster: {Name:default-k8s-diff-port-311562 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVe
                                                
                                                rsion:v1.34.1 ClusterName:default-k8s-diff-port-311562 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.253 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Netw
                                                
                                                ork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:45:25.491492   52157 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:25.517361 52157 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:45:25.535944 52157 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1102 13:45:25.535968 52157 kubeadm.go:598] restartPrimaryControlPlane start ...
I1102 13:45:25.536024 52157 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1102 13:45:25.552538 52157 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1102 13:45:25.553314 52157 kubeconfig.go:47] verify endpoint returned: get endpoint: "default-k8s-diff-port-311562" does not appear in /home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:45:25.553708 52157 kubeconfig.go:62] /home/jenkins/minikube-integration/21808-9383/kubeconfig needs updating (will repair): [kubeconfig missing "default-k8s-diff-port-311562" cluster setting kubeconfig missing "default-k8s-diff-port-311562" context setting]
	I1102 13:45:25.554366   52157 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:25.556177 52157 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1102 13:45:25.573392 52157 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.83.253
I1102 13:45:25.573435 52157 kubeadm.go:1161] stopping kube-system containers ...
	I1102 13:45:25.573505   52157 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:25.604235 52157 docker.go:484] Stopping containers: [3edfba6f9a3b 70e642b4f37f 1c545a43f579 0bebcbbb6c23 859f8a49dca1 dfd9d91f746b 282b9f80c345 38a817203976 326385a03f87 3db15e0e0d1e 693e1d6c029a 3d076807ce98 9c5c22752c78 4f4d927b9b11 20a067500af0 efae7bca22ce]
I1102 13:45:25.604321 52157 ssh_runner.go:195] Run: docker stop 3edfba6f9a3b 70e642b4f37f 1c545a43f579 0bebcbbb6c23 859f8a49dca1 dfd9d91f746b 282b9f80c345 38a817203976 326385a03f87 3db15e0e0d1e 693e1d6c029a 3d076807ce98 9c5c22752c78 4f4d927b9b11 20a067500af0 efae7bca22ce
I1102 13:45:25.644437 52157 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1102 13:45:25.687720 52157 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:45:25.701262 52157 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:45:25.701289 52157 kubeadm.go:158] found existing configuration files:
I1102 13:45:25.701352 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf
I1102 13:45:25.712652 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:45:25.712707 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:45:25.724832 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf
I1102 13:45:25.738016 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:45:25.738088 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:45:25.750148 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf
I1102 13:45:25.761179 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:45:25.761269 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:45:25.779488 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf
I1102 13:45:25.793287 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:45:25.793368 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:45:25.810499 52157 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:45:25.822902 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:25.983948 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.057902 52157 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.073916863s)
I1102 13:45:27.057967 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.350358 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.448927 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.557150 52157 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:27.557235 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:28.057474 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:28.557655 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:29.058130 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:31.478717 52361 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.61.81:22: connect: no route to host
I1102 13:45:27.871749 52806 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1102 13:45:27.871787 52806 preload.go:198] Found local preload: /home/jenkins/minikube-integration/21808-9383/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
I1102 13:45:27.871794 52806 cache.go:59] Caching tarball of preloaded images
I1102 13:45:27.871901 52806 preload.go:233] Found /home/jenkins/minikube-integration/21808-9383/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1102 13:45:27.871914 52806 cache.go:62] Finished verifying existence of preloaded tar for v1.34.1 on docker
I1102 13:45:27.871999 52806 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/config.json ...
	I1102 13:45:27.872018   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/config.json: {Name:mk922c147409f94b9bb8a612e552b8bcbdc6c60d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:45:27.872161   52806 start.go:360] acquireMachinesLock for embed-certs-705938: {Name:mkb7e4680c5870b574bd51b6ea3b2b307ff3694b Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
                                                
                                                I1102 13:45:29.102026 52157 api_server.go:72] duration metric: took 1.544885698s to wait for apiserver process to appear ...
I1102 13:45:29.102065 52157 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:29.102095 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:29.103198 52157 api_server.go:269] stopped: https://192.168.83.253:8444/healthz: Get "https://192.168.83.253:8444/healthz": dial tcp 192.168.83.253:8444: connect: connection refused
I1102 13:45:29.603036 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:32.333055 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:32.333090 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:32.333114 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:32.388238 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:32.388269 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:32.602829 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:32.625545 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:32.625582 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:33.102243 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:33.118492 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:33.118527 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:33.602919 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:33.626727 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:33.626754 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:34.102634 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:34.108370 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 200:
ok
I1102 13:45:34.116358 52157 api_server.go:141] control plane version: v1.34.1
I1102 13:45:34.116386 52157 api_server.go:131] duration metric: took 5.014312473s to wait for apiserver health ...
I1102 13:45:34.116396 52157 cni.go:84] Creating CNI manager for ""
I1102 13:45:34.116406 52157 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:34.117986 52157 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1102 13:45:34.119367 52157 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1102 13:45:34.143868 52157 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1102 13:45:34.171893 52157 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:34.177791 52157 system_pods.go:59] 8 kube-system pods found
I1102 13:45:34.177865 52157 system_pods.go:61] "coredns-66bc5c9577-bnv4n" [111da945-5109-4be5-9c67-f48cdaed8cbe] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:34.177879 52157 system_pods.go:61] "etcd-default-k8s-diff-port-311562" [0d2ea3b5-719d-42ed-b50e-bea33102fbd2] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:34.177892 52157 system_pods.go:61] "kube-apiserver-default-k8s-diff-port-311562" [3ab30a10-b48f-4807-b701-4ed47eb1dec1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:34.177905 52157 system_pods.go:61] "kube-controller-manager-default-k8s-diff-port-311562" [7b9e15c3-e059-4867-832e-5d67b1eff8f8] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:45:34.177919 52157 system_pods.go:61] "kube-proxy-5qv84" [11842593-1fe8-476c-a692-ecdecf44fafa] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:45:34.177927 52157 system_pods.go:61] "kube-scheduler-default-k8s-diff-port-311562" [ad4bb6b8-cae7-4cfc-8f3f-3779226708e6] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:45:34.177946 52157 system_pods.go:61] "metrics-server-746fcd58dc-tcttv" [e9fc9174-d97e-4486-a4da-a405ebd4a7f3] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:34.177960 52157 system_pods.go:61] "storage-provisioner" [ce313798-c158-4174-aec9-8d1e48caceea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1102 13:45:34.177972 52157 system_pods.go:74] duration metric: took 6.048605ms to wait for pod list to return data ...
I1102 13:45:34.177982 52157 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:34.181850 52157 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:34.181899 52157 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:34.181917 52157 node_conditions.go:105] duration metric: took 3.925526ms to run NodePressure ...
I1102 13:45:34.181986 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:34.570876 52157 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1102 13:45:34.574447 52157 kubeadm.go:744] kubelet initialised
I1102 13:45:34.574472 52157 kubeadm.go:745] duration metric: took 3.566201ms waiting for restarted kubelet to initialise ...
I1102 13:45:34.574488 52157 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1102 13:45:34.592867 52157 ops.go:34] apiserver oom_adj: -16
I1102 13:45:34.592894 52157 kubeadm.go:602] duration metric: took 9.056917288s to restartPrimaryControlPlane
I1102 13:45:34.592906 52157 kubeadm.go:403] duration metric: took 9.101604844s to StartCluster
	I1102 13:45:34.592928   52157 settings.go:142] acquiring lock: {Name:mk2d74ff80d6e54b2738086ad41016418abd2f10 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:34.593024 52157 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21808-9383/kubeconfig
	I1102 13:45:34.593729   52157 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:45:34.593994   52157 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.83.253 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:45:34.594093 52157 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1102 13:45:34.594205 52157 config.go:182] Loaded profile config "default-k8s-diff-port-311562": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:34.594219 52157 addons.go:70] Setting metrics-server=true in profile "default-k8s-diff-port-311562"
I1102 13:45:34.594236 52157 addons.go:239] Setting addon metrics-server=true in "default-k8s-diff-port-311562"
I1102 13:45:34.594244 52157 addons.go:70] Setting default-storageclass=true in profile "default-k8s-diff-port-311562"
W1102 13:45:34.594254 52157 addons.go:248] addon metrics-server should already be in state true
I1102 13:45:34.594206 52157 addons.go:70] Setting storage-provisioner=true in profile "default-k8s-diff-port-311562"
I1102 13:45:34.594270 52157 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "default-k8s-diff-port-311562"
I1102 13:45:34.594286 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
	I1102 13:45:34.594286   52157 cache.go:107] acquiring lock: {Name:mkfde24ce23f92e3eaf637254ed5ac4355c07159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:45:34.594296 52157 addons.go:239] Setting addon storage-provisioner=true in "default-k8s-diff-port-311562"
W1102 13:45:34.594326 52157 addons.go:248] addon storage-provisioner should already be in state true
I1102 13:45:34.594260 52157 addons.go:70] Setting dashboard=true in profile "default-k8s-diff-port-311562"
I1102 13:45:34.594370 52157 cache.go:115] /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1102 13:45:34.594371 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
I1102 13:45:34.594373 52157 addons.go:239] Setting addon dashboard=true in "default-k8s-diff-port-311562"
W1102 13:45:34.594385 52157 addons.go:248] addon dashboard should already be in state true
I1102 13:45:34.594382 52157 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 120.497µs
I1102 13:45:34.594393 52157 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1102 13:45:34.594410 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
I1102 13:45:34.594401 52157 cache.go:87] Successfully saved all images to host disk.
I1102 13:45:34.594626 52157 config.go:182] Loaded profile config "default-k8s-diff-port-311562": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1102 13:45:34.598052   52157 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:34.598276 52157 addons.go:239] Setting addon default-storageclass=true in "default-k8s-diff-port-311562"
W1102 13:45:34.598295 52157 addons.go:248] addon default-storageclass should already be in state true
I1102 13:45:34.598320 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
I1102 13:45:34.599734 52157 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1102 13:45:34.599742 52157 out.go:179] * Verifying Kubernetes components...
I1102 13:45:34.599762 52157 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1102 13:45:34.599742 52157 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1102 13:45:34.600285 52157 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:34.600302 52157 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1102 13:45:34.601511 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:34.601857 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1102 13:45:34.601895 52157 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1102 13:45:34.601934 52157 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:34.601948 52157 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1102 13:45:34.601890 52157 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1102 13:45:34.602246   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.602282 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.602739   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.603296 52157 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1102 13:45:34.604120 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:34.604551 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1102 13:45:34.604571 52157 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I1102 13:45:34.604861   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.604901 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.605228   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.606525 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:34.606530 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.607058   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.607090 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.607167   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.607201 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.607285   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                	I1102 13:45:34.607545   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.608392 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.608859   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.608896 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.609076   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.876033 52157 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:34.907535 52157 node_ready.go:35] waiting up to 6m0s for node "default-k8s-diff-port-311562" to be "Ready" ...
I1102 13:45:34.937033 52157 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:45:34.937057 52157 cache_images.go:86] Images are preloaded, skipping loading
I1102 13:45:34.937065 52157 cache_images.go:264] succeeded pushing to: default-k8s-diff-port-311562
I1102 13:45:34.984071 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:34.987387 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1102 13:45:34.987406 52157 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1102 13:45:34.998108 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1102 13:45:34.998131 52157 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1102 13:45:35.001421 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:35.058884 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1102 13:45:35.058914 52157 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1102 13:45:35.065034 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1102 13:45:35.065068 52157 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1102 13:45:35.086869 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1102 13:45:35.086900 52157 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1102 13:45:35.125203 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1102 13:45:35.125231 52157 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1102 13:45:35.127236 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:35.127260 52157 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1102 13:45:35.168655 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:35.200958 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1102 13:45:35.200990 52157 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1102 13:45:35.253959 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1102 13:45:35.253989 52157 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1102 13:45:35.373065 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1102 13:45:35.373093 52157 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1102 13:45:35.451915 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1102 13:45:35.451951 52157 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1102 13:45:35.522842 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:35.522875 52157 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1102 13:45:35.583283 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:36.793258 52157 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.791801585s)
I1102 13:45:36.793434 52157 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.624737745s)
I1102 13:45:36.793466 52157 addons.go:480] Verifying addon metrics-server=true in "default-k8s-diff-port-311562"
W1102 13:45:36.940528 52157 node_ready.go:57] node "default-k8s-diff-port-311562" has "Ready":"False" status (will retry)
I1102 13:45:37.026166 52157 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.442836828s)
I1102 13:45:37.027775 52157 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p default-k8s-diff-port-311562 addons enable metrics-server
I1102 13:45:37.029478 52157 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1102 13:45:34.481026 52361 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.61.81:22: connect: connection refused
I1102 13:45:37.030957 52157 addons.go:515] duration metric: took 2.436866044s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
I1102 13:45:39.865356 52370 start.go:364] duration metric: took 32.34908828s to acquireMachinesLock for "no-preload-047294"
I1102 13:45:39.865424 52370 start.go:96] Skipping create...Using existing machine configuration
I1102 13:45:39.865433 52370 fix.go:54] fixHost starting:
I1102 13:45:39.867915 52370 fix.go:112] recreateIfNeeded on no-preload-047294: state=Stopped err=<nil>
W1102 13:45:39.867946 52370 fix.go:138] unexpected machine state, will restart: <nil>
I1102 13:45:37.581415 52361 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1102 13:45:37.584895 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.585320   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.585354 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.585574 52361 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/config.json ...
I1102 13:45:37.585807 52361 machine.go:94] provisionDockerMachine start ...
I1102 13:45:37.588182 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.588520   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.588546 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.588703 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:37.588927   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:37.588938 52361 main.go:143] libmachine: About to run SSH command:
hostname
I1102 13:45:37.694418 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1102 13:45:37.694464 52361 buildroot.go:166] provisioning hostname "newest-cni-147975"
I1102 13:45:37.697594 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.698064   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.698103 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.698302 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:37.698584   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:37.698601 52361 main.go:143] libmachine: About to run SSH command:
sudo hostname newest-cni-147975 && echo "newest-cni-147975" | sudo tee /etc/hostname
I1102 13:45:37.818073 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: newest-cni-147975
I1102 13:45:37.821034 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.821427   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.821465 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.821608 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:37.821800   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:37.821816 52361 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\snewest-cni-147975' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-147975/g' /etc/hosts;
else
echo '127.0.1.1 newest-cni-147975' | sudo tee -a /etc/hosts;
fi
fi
I1102 13:45:37.929633 52361 main.go:143] libmachine: SSH cmd err, output: <nil>:
	I1102 13:45:37.929665   52361 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21808-9383/.minikube CaCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21808-9383/.minikube}
                                                
                                                I1102 13:45:37.929706 52361 buildroot.go:174] setting up certificates
I1102 13:45:37.929719 52361 provision.go:84] configureAuth start
I1102 13:45:37.932700 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.933090   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.933122 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.935216 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.935541   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.935565 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.935692 52361 provision.go:143] copyHostCerts
I1102 13:45:37.935744 52361 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem, removing ...
I1102 13:45:37.935776 52361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem
I1102 13:45:37.935850 52361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem (1082 bytes)
I1102 13:45:37.935958 52361 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem, removing ...
I1102 13:45:37.935969 52361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem
I1102 13:45:37.936002 52361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem (1123 bytes)
I1102 13:45:37.936085 52361 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem, removing ...
I1102 13:45:37.936094 52361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem
I1102 13:45:37.936135 52361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem (1675 bytes)
I1102 13:45:37.936203 52361 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem org=jenkins.newest-cni-147975 san=[127.0.0.1 192.168.61.81 localhost minikube newest-cni-147975]
I1102 13:45:38.155299 52361 provision.go:177] copyRemoteCerts
I1102 13:45:38.155382 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1102 13:45:38.158216 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.158589   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.158618 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.158749   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:38.239867 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1102 13:45:38.270160 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1102 13:45:38.299806 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1102 13:45:38.335353 52361 provision.go:87] duration metric: took 405.605042ms to configureAuth
I1102 13:45:38.335388 52361 buildroot.go:189] setting minikube options for container-runtime
I1102 13:45:38.335592 52361 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:38.338168 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.338550   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.338571 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:38.338739 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:38.338923   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:38.338933 52361 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1102 13:45:38.438323 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1102 13:45:38.438372 52361 buildroot.go:70] root file system type: tmpfs
I1102 13:45:38.438525 52361 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1102 13:45:38.441568 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.442033   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.442065 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:38.442251 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:38.442490   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:38.442545 52361 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1102 13:45:38.558405 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1102 13:45:38.561299 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.561670   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.561693 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:38.561907 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:38.562125   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:38.562140 52361 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
                                                
                                                I1102 13:45:39.604281 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1102 13:45:39.604317 52361 machine.go:97] duration metric: took 2.018495868s to provisionDockerMachine
I1102 13:45:39.604334 52361 start.go:293] postStartSetup for "newest-cni-147975" (driver="kvm2")
I1102 13:45:39.604373 52361 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1102 13:45:39.604448 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1102 13:45:39.608114 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.608712   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.608752 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.608967   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:39.692467 52361 ssh_runner.go:195] Run: cat /etc/os-release
I1102 13:45:39.697397 52361 info.go:137] Remote host: Buildroot 2025.02
I1102 13:45:39.697432 52361 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/addons for local assets ...
I1102 13:45:39.697520 52361 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/files for local assets ...
I1102 13:45:39.697622 52361 filesync.go:149] local asset: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem -> 132702.pem in /etc/ssl/certs
I1102 13:45:39.697739 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1102 13:45:39.710993 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:39.750212 52361 start.go:296] duration metric: took 145.83601ms for postStartSetup
I1102 13:45:39.750248 52361 fix.go:56] duration metric: took 18.890794371s for fixHost
I1102 13:45:39.753417 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.753758   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.753780 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:39.753984 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:39.754184   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:39.754195 52361 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1102 13:45:39.865055 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: 1762091139.840815559
I1102 13:45:39.865088 52361 fix.go:216] guest clock: 1762091139.840815559
I1102 13:45:39.865100 52361 fix.go:229] Guest: 2025-11-02 13:45:39.840815559 +0000 UTC Remote: 2025-11-02 13:45:39.750251978 +0000 UTC m=+32.410617405 (delta=90.563581ms)
I1102 13:45:39.865202 52361 fix.go:200] guest clock delta is within tolerance: 90.563581ms
I1102 13:45:39.865221 52361 start.go:83] releasing machines lock for "newest-cni-147975", held for 19.005792633s
I1102 13:45:39.869172 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.869788   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.869817 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:39.870311 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:39.870370 52361 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:39.870386 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:39.870423 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:39.870456 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:39.870489 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:39.870554 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:39.870644 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:39.873860 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.874287   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.874308 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.874488   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:39.993846 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:40.033517 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:40.071252 52361 ssh_runner.go:195] Run: openssl version
I1102 13:45:40.078368 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:40.097162 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:40.103935 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:40.104003 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:40.113374 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:40.127502 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:40.144554 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:40.150102 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:40.150187 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:40.157847 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:40.171118 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:40.185519 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:40.190944 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:40.191025 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:40.198479 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:40.211854 52361 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-certificates >/dev/null 2>&1 && sudo update-ca-certificates || true"
I1102 13:45:40.216528 52361 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-trust >/dev/null 2>&1 && sudo update-ca-trust extract || true"
I1102 13:45:40.222954 52361 ssh_runner.go:195] Run: cat /version.json
I1102 13:45:40.223040 52361 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1102 13:45:40.255994 52361 ssh_runner.go:195] Run: systemctl --version
I1102 13:45:40.262413 52361 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1102 13:45:40.268788 52361 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1102 13:45:40.268851   52361 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
                                                
                                                I1102 13:45:40.290406 52361 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1102 13:45:40.290442 52361 start.go:496] detecting cgroup driver to use...
I1102 13:45:40.290591 52361 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:45:40.314229 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1102 13:45:40.329568 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1102 13:45:40.342632 52361 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1102 13:45:40.342731 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1102 13:45:40.356788 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:45:40.372447 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1102 13:45:40.387923 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:45:40.401408 52361 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1102 13:45:40.417560 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1102 13:45:40.432329 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1102 13:45:40.445927 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1102 13:45:40.458528 52361 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1102 13:45:40.470044 52361 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1102 13:45:40.470120 52361 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1102 13:45:40.486640 52361 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1102 13:45:40.501035 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:40.657081 52361 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1102 13:45:40.702685 52361 start.go:496] detecting cgroup driver to use...
I1102 13:45:40.702800 52361 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1102 13:45:40.727172 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:45:40.748964 52361 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1102 13:45:40.770268 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:45:40.788631 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:45:40.808214 52361 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1102 13:45:40.845782 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:45:40.863375 52361 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:45:40.889520 52361 ssh_runner.go:195] Run: which cri-dockerd
I1102 13:45:40.893858 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1102 13:45:40.906355 52361 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1102 13:45:40.932238 52361 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1102 13:45:41.114491 52361 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1102 13:45:41.329446 52361 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1102 13:45:41.329575 52361 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1102 13:45:41.359524 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:45:41.381666 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:41.587399 52361 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:45:42.201139 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1102 13:45:42.219085 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1102 13:45:42.237760 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:42.256754 52361 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1102 13:45:39.869851 52370 out.go:252] * Restarting existing kvm2 VM for "no-preload-047294" ...
I1102 13:45:39.869901 52370 main.go:143] libmachine: starting domain...
I1102 13:45:39.869917 52370 main.go:143] libmachine: ensuring networks are active...
I1102 13:45:39.871270 52370 main.go:143] libmachine: Ensuring network default is active
I1102 13:45:39.871811 52370 main.go:143] libmachine: Ensuring network mk-no-preload-047294 is active
I1102 13:45:39.872613 52370 main.go:143] libmachine: getting domain XML...
I1102 13:45:39.873800 52370 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>no-preload-047294</name>
<uuid>b0e1e20f-05fd-4dfa-b87d-3577e480609e</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/no-preload-047294.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:26:8d:fe'/>
<source network='mk-no-preload-047294'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:f1:04:54'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1102 13:45:41.294503 52370 main.go:143] libmachine: waiting for domain to start...
I1102 13:45:41.296065 52370 main.go:143] libmachine: domain is now running
I1102 13:45:41.296088 52370 main.go:143] libmachine: waiting for IP...
I1102 13:45:41.297084 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:41.297908 52370 main.go:143] libmachine: domain no-preload-047294 has current primary IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:41.297923 52370 main.go:143] libmachine: found domain IP: 192.168.72.63
I1102 13:45:41.297928 52370 main.go:143] libmachine: reserving static IP address...
	I1102 13:45:41.298428   52370 main.go:143] libmachine: found host DHCP lease matching {name: "no-preload-047294", mac: "52:54:00:26:8d:fe", ip: "192.168.72.63"} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:43:15 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                	I1102 13:45:41.298467   52370 main.go:143] libmachine: skip adding static IP to network mk-no-preload-047294 - found existing host DHCP lease matching {name: "no-preload-047294", mac: "52:54:00:26:8d:fe", ip: "192.168.72.63"}
                                                
                                                I1102 13:45:41.298477 52370 main.go:143] libmachine: reserved static IP address 192.168.72.63 for domain no-preload-047294
I1102 13:45:41.298486 52370 main.go:143] libmachine: waiting for SSH...
I1102 13:45:41.298494 52370 main.go:143] libmachine: Getting to WaitForSSH function...
I1102 13:45:41.301093 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:41.301540   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:43:15 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:41.301570 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:41.301922 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:41.302191   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:41.302205 52370 main.go:143] libmachine: About to run SSH command:
exit 0
I1102 13:45:39.410932 52157 node_ready.go:49] node "default-k8s-diff-port-311562" is "Ready"
I1102 13:45:39.410972 52157 node_ready.go:38] duration metric: took 4.503397384s for node "default-k8s-diff-port-311562" to be "Ready" ...
I1102 13:45:39.410995 52157 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:39.411072 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:39.432862 52157 api_server.go:72] duration metric: took 4.838818522s to wait for apiserver process to appear ...
I1102 13:45:39.432889 52157 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:39.432922 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:39.441678 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 200:
ok
I1102 13:45:39.443725 52157 api_server.go:141] control plane version: v1.34.1
I1102 13:45:39.443750 52157 api_server.go:131] duration metric: took 10.854353ms to wait for apiserver health ...
I1102 13:45:39.443761 52157 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:39.450979 52157 system_pods.go:59] 8 kube-system pods found
I1102 13:45:39.451013 52157 system_pods.go:61] "coredns-66bc5c9577-bnv4n" [111da945-5109-4be5-9c67-f48cdaed8cbe] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:39.451024 52157 system_pods.go:61] "etcd-default-k8s-diff-port-311562" [0d2ea3b5-719d-42ed-b50e-bea33102fbd2] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:39.451035 52157 system_pods.go:61] "kube-apiserver-default-k8s-diff-port-311562" [3ab30a10-b48f-4807-b701-4ed47eb1dec1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:39.451043 52157 system_pods.go:61] "kube-controller-manager-default-k8s-diff-port-311562" [7b9e15c3-e059-4867-832e-5d67b1eff8f8] Running
I1102 13:45:39.451052 52157 system_pods.go:61] "kube-proxy-5qv84" [11842593-1fe8-476c-a692-ecdecf44fafa] Running
I1102 13:45:39.451065 52157 system_pods.go:61] "kube-scheduler-default-k8s-diff-port-311562" [ad4bb6b8-cae7-4cfc-8f3f-3779226708e6] Running
I1102 13:45:39.451074 52157 system_pods.go:61] "metrics-server-746fcd58dc-tcttv" [e9fc9174-d97e-4486-a4da-a405ebd4a7f3] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:39.451083 52157 system_pods.go:61] "storage-provisioner" [ce313798-c158-4174-aec9-8d1e48caceea] Running
I1102 13:45:39.451092 52157 system_pods.go:74] duration metric: took 7.317334ms to wait for pod list to return data ...
I1102 13:45:39.451103 52157 default_sa.go:34] waiting for default service account to be created ...
I1102 13:45:39.456302 52157 default_sa.go:45] found service account: "default"
I1102 13:45:39.456332 52157 default_sa.go:55] duration metric: took 5.220853ms for default service account to be created ...
I1102 13:45:39.456362 52157 system_pods.go:116] waiting for k8s-apps to be running ...
I1102 13:45:39.461632 52157 system_pods.go:86] 8 kube-system pods found
I1102 13:45:39.461674 52157 system_pods.go:89] "coredns-66bc5c9577-bnv4n" [111da945-5109-4be5-9c67-f48cdaed8cbe] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:39.461689 52157 system_pods.go:89] "etcd-default-k8s-diff-port-311562" [0d2ea3b5-719d-42ed-b50e-bea33102fbd2] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:39.461701 52157 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-311562" [3ab30a10-b48f-4807-b701-4ed47eb1dec1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:39.461714 52157 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-311562" [7b9e15c3-e059-4867-832e-5d67b1eff8f8] Running
I1102 13:45:39.461721 52157 system_pods.go:89] "kube-proxy-5qv84" [11842593-1fe8-476c-a692-ecdecf44fafa] Running
I1102 13:45:39.461726 52157 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-311562" [ad4bb6b8-cae7-4cfc-8f3f-3779226708e6] Running
I1102 13:45:39.461734 52157 system_pods.go:89] "metrics-server-746fcd58dc-tcttv" [e9fc9174-d97e-4486-a4da-a405ebd4a7f3] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:39.461743 52157 system_pods.go:89] "storage-provisioner" [ce313798-c158-4174-aec9-8d1e48caceea] Running
I1102 13:45:39.461754 52157 system_pods.go:126] duration metric: took 5.382647ms to wait for k8s-apps to be running ...
I1102 13:45:39.461767 52157 system_svc.go:44] waiting for kubelet service to be running ....
I1102 13:45:39.461815 52157 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1102 13:45:39.514748 52157 system_svc.go:56] duration metric: took 52.972619ms WaitForService to wait for kubelet
I1102 13:45:39.514779 52157 kubeadm.go:587] duration metric: took 4.920739305s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1102 13:45:39.514798 52157 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:39.521119 52157 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:39.521150 52157 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:39.521163 52157 node_conditions.go:105] duration metric: took 6.360208ms to run NodePressure ...
I1102 13:45:39.521177 52157 start.go:242] waiting for startup goroutines ...
I1102 13:45:39.521187 52157 start.go:247] waiting for cluster config update ...
I1102 13:45:39.521201 52157 start.go:256] writing updated cluster config ...
I1102 13:45:39.521537 52157 ssh_runner.go:195] Run: rm -f paused
I1102 13:45:39.528712 52157 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:45:39.534448 52157 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-bnv4n" in "kube-system" namespace to be "Ready" or be gone ...
W1102 13:45:41.545277 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
W1102 13:45:44.042820 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
I1102 13:45:42.440572 52361 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1102 13:45:42.638515 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:42.794631 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1102 13:45:42.837723 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:45:42.859277 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:43.058705 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:45:43.093206 52361 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:45:43.113952 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:43.135431 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:45:43.156617 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:43.330285 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:45:43.351880 52361 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:45:43.372910 52361 retry.go:31] will retry after 905.68851ms: cri-docker.service not running
I1102 13:45:44.279050 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:44.296654 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:45:44.312440 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:44.470640 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:45:44.600633 52361 retry.go:31] will retry after 2.029848811s: cri-docker.service not running
I1102 13:45:46.630725 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:46.648951 52361 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1102 13:45:46.649026 52361 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1102 13:45:46.655597 52361 start.go:564] Will wait 60s for crictl version
I1102 13:45:46.655661 52361 ssh_runner.go:195] Run: which crictl
I1102 13:45:46.660129 52361 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1102 13:45:46.705138 52361 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
	I1102 13:45:46.705214   52361 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                	I1102 13:45:46.744452   52361 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                I1102 13:45:46.780423 52361 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:45:46.783916 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:46.784424   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:46.784454 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:46.784707 52361 ssh_runner.go:195] Run: grep 192.168.61.1 host.minikube.internal$ /etc/hosts
	I1102 13:45:46.791049   52361 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.61.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:45:46.815943 52361 out.go:179] - kubeadm.pod-network-cidr=10.42.0.0/16
	I1102 13:45:46.817335   52361 kubeadm.go:884] updating cluster {Name:newest-cni-147975 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
                                                
                                                .34.1 ClusterName:newest-cni-147975 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.61.81 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledSto
                                                
                                                p:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:45:46.817504 52361 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:45:46.817583   52361 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:46.842576 52361 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:45:46.842612 52361 docker.go:621] Images already preloaded, skipping extraction
	I1102 13:45:46.842678   52361 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:46.872207 52361 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:45:46.872231 52361 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:45:46.872240   52361 kubeadm.go:935] updating node { 192.168.61.81 8443 v1.34.1 docker true true} ...
                                                
                                                I1102 13:45:46.872396 52361 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=newest-cni-147975 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.61.81
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:newest-cni-147975 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:45:46.872497   52361 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:45:46.939383 52361 cni.go:84] Creating CNI manager for ""
I1102 13:45:46.939435 52361 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:46.939451 52361 kubeadm.go:85] Using pod CIDR: 10.42.0.0/16
	I1102 13:45:46.939480   52361 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.61.81 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-147975 NodeName:newest-cni-147975 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.61.81"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.61.81 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
                                                
                                                /etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:45:46.939644 52361 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.61.81
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "newest-cni-147975"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.61.81"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.61.81"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.42.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.42.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:45:46.939715 52361 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:45:46.952774 52361 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:45:46.952868 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:45:46.965101 52361 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I1102 13:45:46.992105 52361 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:45:47.018046 52361 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2219 bytes)
I1102 13:45:47.044224 52361 ssh_runner.go:195] Run: grep 192.168.61.81 control-plane.minikube.internal$ /etc/hosts
	I1102 13:45:47.049806   52361 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.61.81	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:45:47.065913 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:47.211899 52361 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:47.246223 52361 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975 for IP: 192.168.61.81
I1102 13:45:47.246248 52361 certs.go:195] generating shared ca certs ...
	I1102 13:45:47.246269   52361 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:47.246458 52361 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:45:47.246525 52361 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:45:47.246536 52361 certs.go:257] generating profile certs ...
I1102 13:45:47.246639 52361 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/client.key
I1102 13:45:47.246728 52361 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/apiserver.key.6dcf010f
I1102 13:45:47.246797 52361 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/proxy-client.key
I1102 13:45:47.246938 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:47.246973 52361 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:47.246983 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:47.247016 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:47.247047 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:47.247090 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:47.247148 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:47.248079 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:45:47.297167 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:45:47.330951 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:45:47.368851 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:45:44.406555 52370 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.63:22: connect: no route to host
W1102 13:45:46.242256 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
W1102 13:45:48.544971 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
I1102 13:45:50.044033 52157 pod_ready.go:94] pod "coredns-66bc5c9577-bnv4n" is "Ready"
I1102 13:45:50.044080 52157 pod_ready.go:86] duration metric: took 10.509604165s for pod "coredns-66bc5c9577-bnv4n" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.048899 52157 pod_ready.go:83] waiting for pod "etcd-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.055237 52157 pod_ready.go:94] pod "etcd-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:50.055266 52157 pod_ready.go:86] duration metric: took 6.333452ms for pod "etcd-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.059738 52157 pod_ready.go:83] waiting for pod "kube-apiserver-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.067246 52157 pod_ready.go:94] pod "kube-apiserver-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:50.067275 52157 pod_ready.go:86] duration metric: took 7.504959ms for pod "kube-apiserver-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.070558 52157 pod_ready.go:83] waiting for pod "kube-controller-manager-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.239054 52157 pod_ready.go:94] pod "kube-controller-manager-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:50.239089 52157 pod_ready.go:86] duration metric: took 168.502477ms for pod "kube-controller-manager-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.439772 52157 pod_ready.go:83] waiting for pod "kube-proxy-5qv84" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.839594 52157 pod_ready.go:94] pod "kube-proxy-5qv84" is "Ready"
I1102 13:45:50.839629 52157 pod_ready.go:86] duration metric: took 399.815208ms for pod "kube-proxy-5qv84" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:51.040877 52157 pod_ready.go:83] waiting for pod "kube-scheduler-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:51.439742 52157 pod_ready.go:94] pod "kube-scheduler-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:51.439773 52157 pod_ready.go:86] duration metric: took 398.85952ms for pod "kube-scheduler-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:51.439788 52157 pod_ready.go:40] duration metric: took 11.911040292s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:45:51.507427 52157 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0)
I1102 13:45:51.510696 52157 out.go:179] * Done! kubectl is now configured to use "default-k8s-diff-port-311562" cluster and "default" namespace by default
I1102 13:45:47.406089 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1102 13:45:47.442991 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1102 13:45:47.476535 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:45:47.518129 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1102 13:45:47.554833 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:47.590010 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:47.624568 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:47.661561 52361 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:45:47.685315 52361 ssh_runner.go:195] Run: openssl version
I1102 13:45:47.692602 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:47.706611 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:47.712410 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:47.712482 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:47.720260 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:47.732929 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:47.751436 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:47.756780 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:47.756847 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:47.764402 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:47.778176 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:47.792782 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:47.798225 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:47.798282 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:47.805999 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:47.818047 52361 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:45:47.823613 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1102 13:45:47.831407 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1102 13:45:47.839095 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1102 13:45:47.847396 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1102 13:45:47.855676 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1102 13:45:47.863567 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1102 13:45:47.871243   52361 kubeadm.go:401] StartCluster: {Name:newest-cni-147975 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
                                                
                                                .1 ClusterName:newest-cni-147975 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.61.81 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<
                                                
                                                nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:45:47.871397   52361 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:47.892424 52361 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:45:47.906131 52361 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1102 13:45:47.906166 52361 kubeadm.go:598] restartPrimaryControlPlane start ...
I1102 13:45:47.906234 52361 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1102 13:45:47.919968 52361 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1102 13:45:47.920513 52361 kubeconfig.go:47] verify endpoint returned: get endpoint: "newest-cni-147975" does not appear in /home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:45:47.920771 52361 kubeconfig.go:62] /home/jenkins/minikube-integration/21808-9383/kubeconfig needs updating (will repair): [kubeconfig missing "newest-cni-147975" cluster setting kubeconfig missing "newest-cni-147975" context setting]
	I1102 13:45:47.921164   52361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:47.922488 52361 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1102 13:45:47.935887 52361 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.61.81
I1102 13:45:47.935923 52361 kubeadm.go:1161] stopping kube-system containers ...
	I1102 13:45:47.935981   52361 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:47.957367 52361 docker.go:484] Stopping containers: [796077e2e305 dda81dfc8f93 7da452f8624c 0e3279276aad 40296b761124 a375f5d36ad1 25ea9b121d14 5d63adead0dc 561a7dbe6c95 4c13272346bd d81530d0990b a051ad8744d8 004a21d2302e 4604fd97c7bc 4c3b7b51c863 0cfe01cc5020 092464ab860e]
I1102 13:45:47.957457 52361 ssh_runner.go:195] Run: docker stop 796077e2e305 dda81dfc8f93 7da452f8624c 0e3279276aad 40296b761124 a375f5d36ad1 25ea9b121d14 5d63adead0dc 561a7dbe6c95 4c13272346bd d81530d0990b a051ad8744d8 004a21d2302e 4604fd97c7bc 4c3b7b51c863 0cfe01cc5020 092464ab860e
I1102 13:45:47.979846 52361 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1102 13:45:47.999177 52361 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:45:48.012380 52361 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:45:48.012405 52361 kubeadm.go:158] found existing configuration files:
I1102 13:45:48.012460 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1102 13:45:48.022972 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:45:48.023027 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:45:48.033987 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1102 13:45:48.047093 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:45:48.047186 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:45:48.058952 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1102 13:45:48.069542 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:45:48.069612 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:45:48.081299 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1102 13:45:48.092106 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:45:48.092176 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:45:48.104552 52361 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:45:48.116359 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:48.247116 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.517955 52361 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.270797804s)
I1102 13:45:49.518018 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.815066 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.879178 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.948523 52361 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:49.948619 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:50.448833 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:50.949043 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:51.448826 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:51.521892 52361 api_server.go:72] duration metric: took 1.573385649s to wait for apiserver process to appear ...
I1102 13:45:51.521919 52361 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:51.521942 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:51.522404 52361 api_server.go:269] stopped: https://192.168.61.81:8443/healthz: Get "https://192.168.61.81:8443/healthz": dial tcp 192.168.61.81:8443: connect: connection refused
I1102 13:45:52.022045 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:50.486647 52370 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.63:22: connect: no route to host
I1102 13:45:54.473174 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:54.473203 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:54.473217 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:54.493004 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:54.493034 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:54.522389 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:54.623269 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:54.623299 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:55.022896 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:55.031634 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:55.031663 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:55.522128 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:55.534685 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:55.534710 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:56.022315 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:56.027465 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 200:
ok
I1102 13:45:56.034524 52361 api_server.go:141] control plane version: v1.34.1
I1102 13:45:56.034546 52361 api_server.go:131] duration metric: took 4.512620372s to wait for apiserver health ...
I1102 13:45:56.034555 52361 cni.go:84] Creating CNI manager for ""
I1102 13:45:56.034565 52361 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:56.036598 52361 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1102 13:45:56.037929 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1102 13:45:56.054007 52361 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1102 13:45:56.093299 52361 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:56.097859 52361 system_pods.go:59] 9 kube-system pods found
I1102 13:45:56.097909 52361 system_pods.go:61] "coredns-66bc5c9577-9kzzv" [c30e9e61-e9a7-41ae-9d6f-d74a8636db46] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.097922 52361 system_pods.go:61] "coredns-66bc5c9577-qgf47" [cf78af7a-9d0c-41de-86b8-cdcb6edb473f] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.097932 52361 system_pods.go:61] "etcd-newest-cni-147975" [4252185b-07b4-4a2c-a158-e5f1c642972b] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:56.097941 52361 system_pods.go:61] "kube-apiserver-newest-cni-147975" [8a53f33b-bf25-40e9-956d-ea5b022bf74d] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:56.097950 52361 system_pods.go:61] "kube-controller-manager-newest-cni-147975" [4acfba8a-ac1a-48ef-9e78-b0c14875bb27] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:45:56.097964 52361 system_pods.go:61] "kube-proxy-9pcbp" [847ce8ad-752e-4f1d-addb-429b2166cf93] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:45:56.097972 52361 system_pods.go:61] "kube-scheduler-newest-cni-147975" [a85a41fb-e8de-429a-bc86-79301882f478] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:45:56.097988 52361 system_pods.go:61] "metrics-server-746fcd58dc-mx8wv" [13cfe3e9-f58b-4179-8995-b1a4924fd34e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:56.097993 52361 system_pods.go:61] "storage-provisioner" [dfc448b3-9eb7-4baf-baa7-e1c638e45984] Running
I1102 13:45:56.098003 52361 system_pods.go:74] duration metric: took 4.680952ms to wait for pod list to return data ...
I1102 13:45:56.098013 52361 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:56.101813 52361 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:56.101840 52361 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:56.101850 52361 node_conditions.go:105] duration metric: took 3.827836ms to run NodePressure ...
I1102 13:45:56.101901 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:56.384024 52361 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1102 13:45:56.399387 52361 ops.go:34] apiserver oom_adj: -16
I1102 13:45:56.399409 52361 kubeadm.go:602] duration metric: took 8.493235964s to restartPrimaryControlPlane
I1102 13:45:56.399417 52361 kubeadm.go:403] duration metric: took 8.528183618s to StartCluster
	I1102 13:45:56.399431   52361 settings.go:142] acquiring lock: {Name:mk2d74ff80d6e54b2738086ad41016418abd2f10 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:56.399513 52361 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21808-9383/kubeconfig
	I1102 13:45:56.400275   52361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:45:56.400526   52361 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.61.81 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:45:56.400618 52361 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1102 13:45:56.400721 52361 addons.go:70] Setting storage-provisioner=true in profile "newest-cni-147975"
I1102 13:45:56.400740 52361 addons.go:239] Setting addon storage-provisioner=true in "newest-cni-147975"
W1102 13:45:56.400751 52361 addons.go:248] addon storage-provisioner should already be in state true
I1102 13:45:56.400757 52361 addons.go:70] Setting default-storageclass=true in profile "newest-cni-147975"
I1102 13:45:56.400781 52361 host.go:66] Checking if "newest-cni-147975" exists ...
I1102 13:45:56.400791 52361 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "newest-cni-147975"
I1102 13:45:56.400798 52361 addons.go:70] Setting dashboard=true in profile "newest-cni-147975"
I1102 13:45:56.400820 52361 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:56.400841 52361 addons.go:239] Setting addon dashboard=true in "newest-cni-147975"
W1102 13:45:56.400851 52361 addons.go:248] addon dashboard should already be in state true
I1102 13:45:56.400884 52361 host.go:66] Checking if "newest-cni-147975" exists ...
	I1102 13:45:56.400898   52361 cache.go:107] acquiring lock: {Name:mkfde24ce23f92e3eaf637254ed5ac4355c07159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:45:56.400947 52361 addons.go:70] Setting metrics-server=true in profile "newest-cni-147975"
I1102 13:45:56.400974 52361 addons.go:239] Setting addon metrics-server=true in "newest-cni-147975"
W1102 13:45:56.400982 52361 addons.go:248] addon metrics-server should already be in state true
I1102 13:45:56.400987 52361 cache.go:115] /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1102 13:45:56.400995 52361 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 106.166µs
I1102 13:45:56.401003 52361 host.go:66] Checking if "newest-cni-147975" exists ...
I1102 13:45:56.401004 52361 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1102 13:45:56.401119 52361 cache.go:87] Successfully saved all images to host disk.
I1102 13:45:56.401304 52361 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:56.402945 52361 out.go:179] * Verifying Kubernetes components...
I1102 13:45:56.404264 52361 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1102 13:45:56.404270 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:56.404303 52361 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1102 13:45:56.404535 52361 addons.go:239] Setting addon default-storageclass=true in "newest-cni-147975"
W1102 13:45:56.404554 52361 addons.go:248] addon default-storageclass should already be in state true
I1102 13:45:56.404574 52361 host.go:66] Checking if "newest-cni-147975" exists ...
	I1102 13:45:56.404800   52361 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:56.405442 52361 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1102 13:45:56.405483 52361 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:56.405498 52361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1102 13:45:56.406423 52361 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1102 13:45:56.406427 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1102 13:45:56.406524 52361 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1102 13:45:56.407066 52361 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:56.407085 52361 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1102 13:45:56.407450 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1102 13:45:56.407470 52361 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I1102 13:45:56.409277 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.410208   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.410241 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:56.410279 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.410488   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                	I1102 13:45:56.410937   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.410999 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.411233   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:56.411461 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:56.411604 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:56.411734 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.411955   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.411992 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.412064   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.412099 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.412162   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                	I1102 13:45:56.412158   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:56.412193 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.412404   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                	I1102 13:45:56.412597   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:56.688581 52361 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:56.714679 52361 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:56.714750 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:56.744715 52361 api_server.go:72] duration metric: took 344.158891ms to wait for apiserver process to appear ...
I1102 13:45:56.744741 52361 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:56.744758 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:56.750446 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 200:
ok
I1102 13:45:56.751676 52361 api_server.go:141] control plane version: v1.34.1
I1102 13:45:56.751699 52361 api_server.go:131] duration metric: took 6.950433ms to wait for apiserver health ...
I1102 13:45:56.751710 52361 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:56.759183 52361 system_pods.go:59] 9 kube-system pods found
I1102 13:45:56.759216 52361 system_pods.go:61] "coredns-66bc5c9577-9kzzv" [c30e9e61-e9a7-41ae-9d6f-d74a8636db46] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.759229 52361 system_pods.go:61] "coredns-66bc5c9577-qgf47" [cf78af7a-9d0c-41de-86b8-cdcb6edb473f] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.759246 52361 system_pods.go:61] "etcd-newest-cni-147975" [4252185b-07b4-4a2c-a158-e5f1c642972b] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:56.759253 52361 system_pods.go:61] "kube-apiserver-newest-cni-147975" [8a53f33b-bf25-40e9-956d-ea5b022bf74d] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:56.759259 52361 system_pods.go:61] "kube-controller-manager-newest-cni-147975" [4acfba8a-ac1a-48ef-9e78-b0c14875bb27] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:45:56.759265 52361 system_pods.go:61] "kube-proxy-9pcbp" [847ce8ad-752e-4f1d-addb-429b2166cf93] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:45:56.759270 52361 system_pods.go:61] "kube-scheduler-newest-cni-147975" [a85a41fb-e8de-429a-bc86-79301882f478] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:45:56.759275 52361 system_pods.go:61] "metrics-server-746fcd58dc-mx8wv" [13cfe3e9-f58b-4179-8995-b1a4924fd34e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:56.759279 52361 system_pods.go:61] "storage-provisioner" [dfc448b3-9eb7-4baf-baa7-e1c638e45984] Running
I1102 13:45:56.759287 52361 system_pods.go:74] duration metric: took 7.5715ms to wait for pod list to return data ...
I1102 13:45:56.759298 52361 default_sa.go:34] waiting for default service account to be created ...
I1102 13:45:56.763228 52361 default_sa.go:45] found service account: "default"
I1102 13:45:56.763256 52361 default_sa.go:55] duration metric: took 3.951354ms for default service account to be created ...
I1102 13:45:56.763271 52361 kubeadm.go:587] duration metric: took 362.717936ms to wait for: map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true]
I1102 13:45:56.763289 52361 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:56.768746 52361 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:56.768769 52361 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:56.768782 52361 node_conditions.go:105] duration metric: took 5.486448ms to run NodePressure ...
I1102 13:45:56.768798 52361 start.go:242] waiting for startup goroutines ...
I1102 13:45:56.944891 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1102 13:45:56.944915 52361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1102 13:45:56.969778 52361 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:45:56.969805 52361 cache_images.go:86] Images are preloaded, skipping loading
I1102 13:45:56.969816 52361 cache_images.go:264] succeeded pushing to: newest-cni-147975
I1102 13:45:56.991460 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:56.993360 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1102 13:45:56.993381 52361 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1102 13:45:57.016284 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:57.043942 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1102 13:45:57.043964 52361 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1102 13:45:57.063179 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1102 13:45:57.063207 52361 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1102 13:45:57.103218 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:57.103246 52361 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1102 13:45:57.120682 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1102 13:45:57.120722 52361 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1102 13:45:57.158831 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:57.183928 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1102 13:45:57.183956 52361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1102 13:45:57.245005 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1102 13:45:57.245040 52361 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1102 13:45:57.317172 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1102 13:45:57.317202 52361 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1102 13:45:53.488158 52370 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.63:22: connect: connection refused
I1102 13:45:56.610217 52370 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1102 13:45:56.614168 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.614598   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.614631 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.614943 52370 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/config.json ...
I1102 13:45:56.615239 52370 machine.go:94] provisionDockerMachine start ...
I1102 13:45:56.617745 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.618205   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.618239 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.618488 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:56.618794   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:56.618812 52370 main.go:143] libmachine: About to run SSH command:
hostname
I1102 13:45:56.737480 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1102 13:45:56.737514 52370 buildroot.go:166] provisioning hostname "no-preload-047294"
I1102 13:45:56.741083 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.741556   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.741581 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.741755 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:56.742047   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:56.742065 52370 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-047294 && echo "no-preload-047294" | sudo tee /etc/hostname
I1102 13:45:56.885194 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-047294
I1102 13:45:56.888085 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.888599   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.888634 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.888846 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:56.889114   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:56.889139 52370 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-047294' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-047294/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-047294' | sudo tee -a /etc/hosts;
fi
fi
I1102 13:45:57.022832 52370 main.go:143] libmachine: SSH cmd err, output: <nil>:
	I1102 13:45:57.022861   52370 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21808-9383/.minikube CaCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21808-9383/.minikube}
                                                
                                                I1102 13:45:57.022883 52370 buildroot.go:174] setting up certificates
I1102 13:45:57.022894 52370 provision.go:84] configureAuth start
I1102 13:45:57.026486 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.027035   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.027080 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.029781 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.030259   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.030282 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.030445 52370 provision.go:143] copyHostCerts
I1102 13:45:57.030489 52370 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem, removing ...
I1102 13:45:57.030506 52370 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem
I1102 13:45:57.030569 52370 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem (1082 bytes)
I1102 13:45:57.030696 52370 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem, removing ...
I1102 13:45:57.030705 52370 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem
I1102 13:45:57.030734 52370 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem (1123 bytes)
I1102 13:45:57.030800 52370 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem, removing ...
I1102 13:45:57.030814 52370 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem
I1102 13:45:57.030842 52370 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem (1675 bytes)
I1102 13:45:57.030903 52370 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem org=jenkins.no-preload-047294 san=[127.0.0.1 192.168.72.63 localhost minikube no-preload-047294]
I1102 13:45:57.401279 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1102 13:45:57.401305 52361 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1102 13:45:57.478491 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1102 13:45:57.478524 52361 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1102 13:45:57.545072 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:57.545096 52361 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1102 13:45:57.615754 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:58.907853 52361 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.891527315s)
I1102 13:45:58.915190 52361 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.756319413s)
I1102 13:45:58.915238 52361 addons.go:480] Verifying addon metrics-server=true in "newest-cni-147975"
I1102 13:45:59.192585 52361 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.576777475s)
I1102 13:45:59.194323 52361 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p newest-cni-147975 addons enable metrics-server
I1102 13:45:59.196091 52361 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1102 13:45:59.197461 52361 addons.go:515] duration metric: took 2.796848686s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
I1102 13:45:59.197511 52361 start.go:247] waiting for cluster config update ...
I1102 13:45:59.197531 52361 start.go:256] writing updated cluster config ...
I1102 13:45:59.197880 52361 ssh_runner.go:195] Run: rm -f paused
I1102 13:45:59.254393 52361 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0)
I1102 13:45:59.255941 52361 out.go:179] * Done! kubectl is now configured to use "newest-cni-147975" cluster and "default" namespace by default
I1102 13:45:59.477039 52806 start.go:364] duration metric: took 31.604839295s to acquireMachinesLock for "embed-certs-705938"
	I1102 13:45:59.477108   52806 start.go:93] Provisioning new machine with config: &{Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{K
                                                
                                                ubernetesVersion:v1.34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker
                                                
                                                BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:45:59.477222 52806 start.go:125] createHost starting for "" (driver="kvm2")
I1102 13:45:57.583058 52370 provision.go:177] copyRemoteCerts
I1102 13:45:57.583132 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1102 13:45:57.586046 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.586503   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.586535 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.586686   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:45:57.678038 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1102 13:45:57.713691 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1102 13:45:57.747399 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1102 13:45:57.776834 52370 provision.go:87] duration metric: took 753.922019ms to configureAuth
I1102 13:45:57.776873 52370 buildroot.go:189] setting minikube options for container-runtime
I1102 13:45:57.777104 52370 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:57.779796 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.780355   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.780393 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.780606 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:57.780874   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:57.780889 52370 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1102 13:45:57.901865 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1102 13:45:57.901895 52370 buildroot.go:70] root file system type: tmpfs
I1102 13:45:57.902036 52370 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1102 13:45:57.905419 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.905926   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.905963 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.906383 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:57.906673   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:57.906754 52370 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1102 13:45:58.044239 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1102 13:45:58.047301 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:58.047691   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:58.047728 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:58.047920 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:58.048159   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:58.048178 52370 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
                                                
                                                I1102 13:45:59.198840 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1102 13:45:59.198871 52370 machine.go:97] duration metric: took 2.583615843s to provisionDockerMachine
I1102 13:45:59.198887 52370 start.go:293] postStartSetup for "no-preload-047294" (driver="kvm2")
I1102 13:45:59.198899 52370 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1102 13:45:59.198955 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1102 13:45:59.202023 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.202491   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.202518 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.202679   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:45:59.295390 52370 ssh_runner.go:195] Run: cat /etc/os-release
I1102 13:45:59.301095 52370 info.go:137] Remote host: Buildroot 2025.02
I1102 13:45:59.301126 52370 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/addons for local assets ...
I1102 13:45:59.301195 52370 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/files for local assets ...
I1102 13:45:59.301317 52370 filesync.go:149] local asset: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem -> 132702.pem in /etc/ssl/certs
I1102 13:45:59.301472 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1102 13:45:59.315721 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:59.353954 52370 start.go:296] duration metric: took 155.052404ms for postStartSetup
I1102 13:45:59.353996 52370 fix.go:56] duration metric: took 19.488564286s for fixHost
I1102 13:45:59.357200 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.357763   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.357802 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:59.358072 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:59.358370   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:59.358393 52370 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1102 13:45:59.476875 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: 1762091159.443263241
I1102 13:45:59.476904 52370 fix.go:216] guest clock: 1762091159.443263241
I1102 13:45:59.476913 52370 fix.go:229] Guest: 2025-11-02 13:45:59.443263241 +0000 UTC Remote: 2025-11-02 13:45:59.35400079 +0000 UTC m=+51.971877286 (delta=89.262451ms)
I1102 13:45:59.476929 52370 fix.go:200] guest clock delta is within tolerance: 89.262451ms
I1102 13:45:59.476935 52370 start.go:83] releasing machines lock for "no-preload-047294", held for 19.611548915s
I1102 13:45:59.480372 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.480900   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.480933 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:59.481279 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:59.481332 52370 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:59.481362 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:59.481404 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:59.481437 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:59.481472 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:59.481531 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:59.481620 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:59.484433 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.484830   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.484851 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.485012   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:45:59.604225 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:59.644148 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:59.684494 52370 ssh_runner.go:195] Run: openssl version
I1102 13:45:59.692201 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:59.712037 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:59.718376 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:59.718450 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:59.727594 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:59.748974 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:59.769545 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:59.776210 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:59.776285 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:59.784503 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:59.802496 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:59.819078 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:59.825426 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:59.825502 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:59.833285 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:59.847905 52370 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-certificates >/dev/null 2>&1 && sudo update-ca-certificates || true"
I1102 13:45:59.853749 52370 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-trust >/dev/null 2>&1 && sudo update-ca-trust extract || true"
I1102 13:45:59.858856 52370 ssh_runner.go:195] Run: cat /version.json
I1102 13:45:59.858951 52370 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1102 13:45:59.890927 52370 ssh_runner.go:195] Run: systemctl --version
I1102 13:45:59.897644 52370 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1102 13:45:59.904679 52370 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1102 13:45:59.904750   52370 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
                                                
                                                I1102 13:45:59.935576 52370 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1102 13:45:59.935606 52370 start.go:496] detecting cgroup driver to use...
I1102 13:45:59.935730 52370 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:45:59.964279 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1102 13:45:59.979683 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1102 13:45:59.993474 52370 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1102 13:45:59.993536 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1102 13:46:00.008230 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:00.022562 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1102 13:46:00.036229 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:00.050572 52370 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1102 13:46:00.067799 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1102 13:46:00.081907 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1102 13:46:00.096308 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1102 13:46:00.114387 52370 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1102 13:46:00.130483 52370 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1102 13:46:00.130563 52370 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1102 13:46:00.150302 52370 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1102 13:46:00.167595 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:00.335252 52370 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1102 13:46:00.384484 52370 start.go:496] detecting cgroup driver to use...
I1102 13:46:00.384583 52370 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1102 13:46:00.402739 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:00.421297 52370 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1102 13:46:00.448214 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:00.467301 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:00.485501 52370 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1102 13:46:00.529031 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:00.548789 52370 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:46:00.576038 52370 ssh_runner.go:195] Run: which cri-dockerd
I1102 13:46:00.580513 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1102 13:46:00.593719 52370 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1102 13:46:00.619204 52370 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1102 13:46:00.826949 52370 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1102 13:46:01.010283 52370 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1102 13:46:01.010413 52370 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1102 13:46:01.035123 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:46:01.052102 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:01.236793 52370 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:46:01.926974 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1102 13:46:01.947750 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1102 13:46:01.966571 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:01.988323 52370 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1102 13:46:02.198426 52370 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1102 13:46:02.389146 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:59.479214 52806 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
I1102 13:45:59.479488 52806 start.go:159] libmachine.API.Create for "embed-certs-705938" (driver="kvm2")
I1102 13:45:59.479530 52806 client.go:173] LocalClient.Create starting
I1102 13:45:59.479625 52806 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem
I1102 13:45:59.479679 52806 main.go:143] libmachine: Decoding PEM data...
I1102 13:45:59.479711 52806 main.go:143] libmachine: Parsing certificate...
I1102 13:45:59.479809 52806 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem
I1102 13:45:59.479847 52806 main.go:143] libmachine: Decoding PEM data...
I1102 13:45:59.479870 52806 main.go:143] libmachine: Parsing certificate...
I1102 13:45:59.480302 52806 main.go:143] libmachine: creating domain...
I1102 13:45:59.480322 52806 main.go:143] libmachine: creating network...
I1102 13:45:59.482237 52806 main.go:143] libmachine: found existing default network
I1102 13:45:59.482497 52806 main.go:143] libmachine: <network connections='4'>
<name>default</name>
<uuid>c61344c2-dba2-46dd-a21a-34776d235985</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:10:a2:1d'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
	I1102 13:45:59.483419   52806 network.go:211] skipping subnet 192.168.39.0/24 that is taken: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName:virbr1 IfaceIPv4:192.168.39.1 IfaceMTU:1500 IfaceMAC:52:54:00:82:31:61} reservation:<nil>}
                                                
                                                	I1102 13:45:59.484544   52806 network.go:206] using free private subnet 192.168.50.0/24: &{IP:192.168.50.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.50.0/24 Gateway:192.168.50.1 ClientMin:192.168.50.2 ClientMax:192.168.50.254 Broadcast:192.168.50.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001dff020}
                                                
                                                I1102 13:45:59.484640 52806 main.go:143] libmachine: defining private network:
<network>
<name>mk-embed-certs-705938</name>
<dns enable='no'/>
<ip address='192.168.50.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.50.2' end='192.168.50.253'/>
</dhcp>
</ip>
</network>
I1102 13:45:59.492966 52806 main.go:143] libmachine: creating private network mk-embed-certs-705938 192.168.50.0/24...
I1102 13:45:59.583327 52806 main.go:143] libmachine: private network mk-embed-certs-705938 192.168.50.0/24 created
I1102 13:45:59.583618 52806 main.go:143] libmachine: <network>
<name>mk-embed-certs-705938</name>
<uuid>04682b65-7e72-41b1-ae2d-736eef505059</uuid>
<bridge name='virbr2' stp='on' delay='0'/>
<mac address='52:54:00:dc:11:a7'/>
<dns enable='no'/>
<ip address='192.168.50.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.50.2' end='192.168.50.253'/>
</dhcp>
</ip>
</network>
I1102 13:45:59.583664 52806 main.go:143] libmachine: setting up store path in /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938 ...
I1102 13:45:59.583715 52806 main.go:143] libmachine: building disk image from file:///home/jenkins/minikube-integration/21808-9383/.minikube/cache/iso/amd64/minikube-v1.37.0-1761658712-21800-amd64.iso
I1102 13:45:59.583730 52806 common.go:152] Making disk image using store path: /home/jenkins/minikube-integration/21808-9383/.minikube
I1102 13:45:59.583815 52806 main.go:143] libmachine: Downloading /home/jenkins/minikube-integration/21808-9383/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/21808-9383/.minikube/cache/iso/amd64/minikube-v1.37.0-1761658712-21800-amd64.iso...
I1102 13:45:59.852164 52806 common.go:159] Creating ssh key: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa...
I1102 13:45:59.984327 52806 common.go:165] Creating raw disk image: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/embed-certs-705938.rawdisk...
I1102 13:45:59.984383 52806 main.go:143] libmachine: Writing magic tar header
I1102 13:45:59.984407 52806 main.go:143] libmachine: Writing SSH key tar header
I1102 13:45:59.984516 52806 common.go:179] Fixing permissions on /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938 ...
I1102 13:45:59.984609 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938
I1102 13:45:59.984657 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938 (perms=drwx------)
I1102 13:45:59.984690 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383/.minikube/machines
I1102 13:45:59.984709 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383/.minikube/machines (perms=drwxr-xr-x)
I1102 13:45:59.984729 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383/.minikube
I1102 13:45:59.984748 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383/.minikube (perms=drwxr-xr-x)
I1102 13:45:59.984763 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383
I1102 13:45:59.984783 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383 (perms=drwxrwxr-x)
I1102 13:45:59.984800 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration
I1102 13:45:59.984814 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration (perms=drwxrwxr-x)
I1102 13:45:59.984828 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins
I1102 13:45:59.984839 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
I1102 13:45:59.984851 52806 main.go:143] libmachine: checking permissions on dir: /home
I1102 13:45:59.984883 52806 main.go:143] libmachine: skipping /home - not owner
I1102 13:45:59.984894 52806 main.go:143] libmachine: defining domain...
I1102 13:45:59.986152 52806 main.go:143] libmachine: defining domain using XML:
<domain type='kvm'>
<name>embed-certs-705938</name>
<memory unit='MiB'>3072</memory>
<vcpu>2</vcpu>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'>
</cpu>
<os>
<type>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<devices>
<disk type='file' device='cdrom'>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='default' io='threads' />
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/embed-certs-705938.rawdisk'/>
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='mk-embed-certs-705938'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
</rng>
</devices>
</domain>
I1102 13:45:59.995830 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:1e:e2:b0 in network default
I1102 13:45:59.996536 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:45:59.996554 52806 main.go:143] libmachine: starting domain...
I1102 13:45:59.996559 52806 main.go:143] libmachine: ensuring networks are active...
I1102 13:45:59.997462 52806 main.go:143] libmachine: Ensuring network default is active
I1102 13:45:59.997971 52806 main.go:143] libmachine: Ensuring network mk-embed-certs-705938 is active
I1102 13:45:59.998947 52806 main.go:143] libmachine: getting domain XML...
I1102 13:46:00.000542 52806 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>embed-certs-705938</name>
<uuid>32fc3864-8fea-4d1b-850e-64e3e9ecc065</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/embed-certs-705938.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:54:db:56'/>
<source network='mk-embed-certs-705938'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:1e:e2:b0'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1102 13:46:01.577152 52806 main.go:143] libmachine: waiting for domain to start...
I1102 13:46:01.579225 52806 main.go:143] libmachine: domain is now running
I1102 13:46:01.579247 52806 main.go:143] libmachine: waiting for IP...
I1102 13:46:01.580381 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:01.581278 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:01.581301 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:01.581911 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:01.581961 52806 retry.go:31] will retry after 283.516454ms: waiting for domain to come up
I1102 13:46:01.867902 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:01.868662 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:01.868680 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:01.869097 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:01.869146 52806 retry.go:31] will retry after 322.823728ms: waiting for domain to come up
I1102 13:46:02.193585 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:02.194327 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:02.194359 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:02.194870 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:02.194913 52806 retry.go:31] will retry after 355.185879ms: waiting for domain to come up
I1102 13:46:02.551677 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:02.681737 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:02.681759 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:02.682464 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:02.682510 52806 retry.go:31] will retry after 460.738696ms: waiting for domain to come up
I1102 13:46:02.565781 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1102 13:46:02.617258 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:02.637998 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:02.806275 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:02.834599 52370 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:46:02.854685 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:02.878203 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:02.897523 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:03.087812 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:03.114411 52370 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:46:03.130670 52370 retry.go:31] will retry after 1.304462067s: cri-docker.service not running
I1102 13:46:04.435472 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:04.456844 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:04.472965 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:04.664589 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:04.825530 52370 retry.go:31] will retry after 1.319662502s: cri-docker.service not running
I1102 13:46:06.145463 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:06.164945 52370 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1102 13:46:06.165021 52370 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1102 13:46:06.171435 52370 start.go:564] Will wait 60s for crictl version
I1102 13:46:06.171507 52370 ssh_runner.go:195] Run: which crictl
I1102 13:46:06.175734 52370 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1102 13:46:06.219786 52370 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
	I1102 13:46:06.219864   52370 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                	I1102 13:46:06.249106   52370 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                I1102 13:46:06.350551 52370 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:46:06.353328 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:06.353762   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:06.353789 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:46:06.354004 52370 ssh_runner.go:195] Run: grep 192.168.72.1 host.minikube.internal$ /etc/hosts
	I1102 13:46:06.359460   52370 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.72.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                	I1102 13:46:06.375911   52370 kubeadm.go:884] updating cluster {Name:no-preload-047294 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
                                                
                                                .34.1 ClusterName:no-preload-047294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.63 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNo
                                                
                                                deRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:46:06.376058 52370 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:46:06.376096   52370 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:06.398237 52370 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:46:06.398262 52370 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:46:06.398271   52370 kubeadm.go:935] updating node { 192.168.72.63 8443 v1.34.1 docker true true} ...
                                                
                                                I1102 13:46:06.398392 52370 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-047294 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.72.63
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:no-preload-047294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:46:06.398467   52370 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:46:06.459190 52370 cni.go:84] Creating CNI manager for ""
I1102 13:46:06.459232 52370 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:46:06.459248 52370 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1102 13:46:06.459274   52370 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.72.63 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-047294 NodeName:no-preload-047294 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.72.63"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.72.63 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath
                                                
                                                :/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:46:06.459465 52370 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.72.63
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "no-preload-047294"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.72.63"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.72.63"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:46:06.459538 52370 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:46:06.472592 52370 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:46:06.472665 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:46:06.485059 52370 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I1102 13:46:06.505950 52370 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:46:06.526204 52370 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2221 bytes)
I1102 13:46:06.548166 52370 ssh_runner.go:195] Run: grep 192.168.72.63 control-plane.minikube.internal$ /etc/hosts
	I1102 13:46:06.553556   52370 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.72.63	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:46:06.574072 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:06.740520 52370 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:46:06.778762 52370 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294 for IP: 192.168.72.63
I1102 13:46:06.778787 52370 certs.go:195] generating shared ca certs ...
	I1102 13:46:06.778802   52370 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:06.778979 52370 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:46:06.779039 52370 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:46:06.779054 52370 certs.go:257] generating profile certs ...
I1102 13:46:06.779158 52370 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/client.key
I1102 13:46:06.779228 52370 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/apiserver.key.8b0bc00d
I1102 13:46:06.779288 52370 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/proxy-client.key
I1102 13:46:06.779447 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:46:06.779492 52370 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:46:06.779510 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:46:06.779548 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:46:06.779581 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:46:06.779613 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:46:06.779673 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:06.780429 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:46:06.826941 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:46:06.872412 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:46:06.919682 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:46:06.978960 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1102 13:46:07.012810 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1102 13:46:07.048070 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:46:07.079490 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1102 13:46:07.116863 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:46:07.150279 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:46:07.192312 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:46:07.229893 52370 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:46:07.250158 52370 ssh_runner.go:195] Run: openssl version
I1102 13:46:07.256701 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:46:07.270321 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:07.275945 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:07.275998 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:07.284035 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:46:07.296858 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:46:07.310533 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:46:07.317211 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:46:07.317285 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:46:07.324943 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:46:07.336779 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:46:07.350857 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:46:07.357908 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:46:07.357969 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:46:07.368355 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:46:07.381794 52370 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:46:07.387640 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1102 13:46:07.395907 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1102 13:46:07.403326 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1102 13:46:07.412467 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1102 13:46:07.422486 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1102 13:46:07.430295 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1102 13:46:07.439746   52370 kubeadm.go:401] StartCluster: {Name:no-preload-047294 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
                                                
                                                .1 ClusterName:no-preload-047294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.63 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeR
                                                
                                                equested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:46:07.439918   52370 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:46:03.145374 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:03.146146 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:03.146162 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:03.146719 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:03.146754 52806 retry.go:31] will retry after 704.147192ms: waiting for domain to come up
I1102 13:46:03.852816 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:03.853888 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:03.853923 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:03.854438 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:03.854487 52806 retry.go:31] will retry after 839.707232ms: waiting for domain to come up
I1102 13:46:04.695645 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:04.696611 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:04.696632 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:04.697109 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:04.697146 52806 retry.go:31] will retry after 821.241975ms: waiting for domain to come up
I1102 13:46:05.520124 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:05.520894 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:05.520916 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:05.521369 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:05.521406 52806 retry.go:31] will retry after 1.267201306s: waiting for domain to come up
I1102 13:46:06.790524 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:06.791257 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:06.791276 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:06.791681 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:06.791721 52806 retry.go:31] will retry after 1.300732149s: waiting for domain to come up
I1102 13:46:07.468395 52370 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:46:07.483402 52370 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1102 13:46:07.483425 52370 kubeadm.go:598] restartPrimaryControlPlane start ...
I1102 13:46:07.483484 52370 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1102 13:46:07.497032 52370 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1102 13:46:07.497623 52370 kubeconfig.go:47] verify endpoint returned: get endpoint: "no-preload-047294" does not appear in /home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:46:07.497875 52370 kubeconfig.go:62] /home/jenkins/minikube-integration/21808-9383/kubeconfig needs updating (will repair): [kubeconfig missing "no-preload-047294" cluster setting kubeconfig missing "no-preload-047294" context setting]
	I1102 13:46:07.498285   52370 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:07.499579 52370 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1102 13:46:07.511419 52370 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.72.63
I1102 13:46:07.511456 52370 kubeadm.go:1161] stopping kube-system containers ...
	I1102 13:46:07.511512   52370 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:46:07.533372 52370 docker.go:484] Stopping containers: [53aacf7a8c25 e2e83452259d caea875f38d2 3d285a4769ed 5c74e9de1020 7f0ca45f2366 47f3841a16f1 9ec2a9113b2f 71dc31d7068c 6b5f5b0d4dda 0d441b292a67 92c90b4f3e13 dc5e08e88d0a ae18505f9975 d663f497e9c9 d071178686a8 542226c154fb]
I1102 13:46:07.533460 52370 ssh_runner.go:195] Run: docker stop 53aacf7a8c25 e2e83452259d caea875f38d2 3d285a4769ed 5c74e9de1020 7f0ca45f2366 47f3841a16f1 9ec2a9113b2f 71dc31d7068c 6b5f5b0d4dda 0d441b292a67 92c90b4f3e13 dc5e08e88d0a ae18505f9975 d663f497e9c9 d071178686a8 542226c154fb
I1102 13:46:07.555045 52370 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1102 13:46:07.575939 52370 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:46:07.588797 52370 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:46:07.588839 52370 kubeadm.go:158] found existing configuration files:
I1102 13:46:07.588903 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1102 13:46:07.600185 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:46:07.600253 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:46:07.612049 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1102 13:46:07.623377 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:46:07.623439 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:46:07.634967 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1102 13:46:07.645964 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:46:07.646028 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:46:07.658165 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1102 13:46:07.668751 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:46:07.668819 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:46:07.680242 52370 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:46:07.691767 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:07.839761 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:08.943799 52370 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.103994694s)
I1102 13:46:08.943882 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:09.179272 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:09.267190 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:09.336193 52370 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:46:09.336277 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:09.836627 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:10.337333 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:10.837392 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:10.911635 52370 api_server.go:72] duration metric: took 1.575444882s to wait for apiserver process to appear ...
I1102 13:46:10.911672 52370 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:46:10.911697 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:10.912267 52370 api_server.go:269] stopped: https://192.168.72.63:8443/healthz: Get "https://192.168.72.63:8443/healthz": dial tcp 192.168.72.63:8443: connect: connection refused
I1102 13:46:11.412006 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:08.093977 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:08.094746 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:08.094767 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:08.095264 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:08.095297 52806 retry.go:31] will retry after 1.468485697s: waiting for domain to come up
I1102 13:46:09.565558 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:09.566316 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:09.566333 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:09.566791 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:09.566827 52806 retry.go:31] will retry after 1.801199922s: waiting for domain to come up
I1102 13:46:11.370817 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:11.371787 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:11.371806 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:11.372220 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:11.372259 52806 retry.go:31] will retry after 2.844673s: waiting for domain to come up
I1102 13:46:13.451636 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:46:13.451675 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:46:13.451695 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:13.549566 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[-]poststarthook/start-apiextensions-controllers failed: reason withheld
[-]poststarthook/crd-informer-synced failed: reason withheld
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[-]poststarthook/start-service-ip-repair-controllers failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/start-kubernetes-service-cidr-controller failed: reason withheld
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:46:13.549608 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[-]poststarthook/start-apiextensions-controllers failed: reason withheld
[-]poststarthook/crd-informer-synced failed: reason withheld
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[-]poststarthook/start-service-ip-repair-controllers failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/start-kubernetes-service-cidr-controller failed: reason withheld
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:46:13.912124 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:13.918453 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:46:13.918480 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:46:14.412076 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:14.418219 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:46:14.418244 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:46:14.911855 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:14.922266 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 200:
ok
I1102 13:46:14.949261 52370 api_server.go:141] control plane version: v1.34.1
I1102 13:46:14.949299 52370 api_server.go:131] duration metric: took 4.037618193s to wait for apiserver health ...
I1102 13:46:14.949311 52370 cni.go:84] Creating CNI manager for ""
I1102 13:46:14.949326 52370 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:46:14.950858 52370 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1102 13:46:14.952463 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1102 13:46:14.999278 52370 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1102 13:46:15.054778 52370 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:46:15.064565 52370 system_pods.go:59] 8 kube-system pods found
I1102 13:46:15.064609 52370 system_pods.go:61] "coredns-66bc5c9577-th5tq" [65f5112e-1f3c-4f25-b91a-aa016db03acd] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:46:15.064619 52370 system_pods.go:61] "etcd-no-preload-047294" [150d4c86-f602-4a05-a8d3-6f54c6402abc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:46:15.064627 52370 system_pods.go:61] "kube-apiserver-no-preload-047294" [6d35fe6a-5edf-4a16-a84f-fd8f527f48fe] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:46:15.064633 52370 system_pods.go:61] "kube-controller-manager-no-preload-047294" [888bc1fb-1c42-44a9-aa46-ba3e9ee49ee4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:46:15.064639 52370 system_pods.go:61] "kube-proxy-nw5rx" [5dc5f78c-c165-402a-b834-f2f64e5ac4e2] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:46:15.064644 52370 system_pods.go:61] "kube-scheduler-no-preload-047294" [af7ccc77-6a6b-4960-b6e4-63c69def3029] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:46:15.064652 52370 system_pods.go:61] "metrics-server-746fcd58dc-6pkxd" [65a57acb-e4e7-4fec-b299-0ae0667ed73a] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:46:15.064657 52370 system_pods.go:61] "storage-provisioner" [d6458b18-8300-444e-9e82-75cb7ba64d82] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1102 13:46:15.064663 52370 system_pods.go:74] duration metric: took 9.863658ms to wait for pod list to return data ...
I1102 13:46:15.064676 52370 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:46:15.070973 52370 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:46:15.070998 52370 node_conditions.go:123] node cpu capacity is 2
I1102 13:46:15.071007 52370 node_conditions.go:105] duration metric: took 6.327227ms to run NodePressure ...
I1102 13:46:15.071052 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:15.456486 52370 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1102 13:46:15.468525 52370 kubeadm.go:744] kubelet initialised
I1102 13:46:15.468550 52370 kubeadm.go:745] duration metric: took 12.036942ms waiting for restarted kubelet to initialise ...
I1102 13:46:15.468569 52370 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1102 13:46:15.486589 52370 ops.go:34] apiserver oom_adj: -16
I1102 13:46:15.486615 52370 kubeadm.go:602] duration metric: took 8.003182012s to restartPrimaryControlPlane
I1102 13:46:15.486627 52370 kubeadm.go:403] duration metric: took 8.04689358s to StartCluster
	I1102 13:46:15.486646   52370 settings.go:142] acquiring lock: {Name:mk2d74ff80d6e54b2738086ad41016418abd2f10 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:15.486716 52370 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21808-9383/kubeconfig
	I1102 13:46:15.487838   52370 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:46:15.488115   52370 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.72.63 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:46:15.488187 52370 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1102 13:46:15.488294 52370 addons.go:70] Setting storage-provisioner=true in profile "no-preload-047294"
I1102 13:46:15.488315 52370 addons.go:239] Setting addon storage-provisioner=true in "no-preload-047294"
W1102 13:46:15.488327 52370 addons.go:248] addon storage-provisioner should already be in state true
I1102 13:46:15.488333 52370 addons.go:70] Setting default-storageclass=true in profile "no-preload-047294"
I1102 13:46:15.488362 52370 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:46:15.488369 52370 addons.go:70] Setting metrics-server=true in profile "no-preload-047294"
I1102 13:46:15.488375 52370 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-047294"
I1102 13:46:15.488381 52370 addons.go:239] Setting addon metrics-server=true in "no-preload-047294"
W1102 13:46:15.488389 52370 addons.go:248] addon metrics-server should already be in state true
I1102 13:46:15.488404 52370 host.go:66] Checking if "no-preload-047294" exists ...
	I1102 13:46:15.488430   52370 cache.go:107] acquiring lock: {Name:mkfde24ce23f92e3eaf637254ed5ac4355c07159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:46:15.488498 52370 cache.go:115] /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1102 13:46:15.488514 52370 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 90.577µs
I1102 13:46:15.488523 52370 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1102 13:46:15.488531 52370 cache.go:87] Successfully saved all images to host disk.
I1102 13:46:15.488517 52370 addons.go:70] Setting dashboard=true in profile "no-preload-047294"
I1102 13:46:15.488618 52370 addons.go:239] Setting addon dashboard=true in "no-preload-047294"
W1102 13:46:15.488641 52370 addons.go:248] addon dashboard should already be in state true
I1102 13:46:15.488695 52370 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:46:15.488723 52370 host.go:66] Checking if "no-preload-047294" exists ...
I1102 13:46:15.488364 52370 host.go:66] Checking if "no-preload-047294" exists ...
I1102 13:46:15.490928 52370 out.go:179] * Verifying Kubernetes components...
	I1102 13:46:15.491588   52370 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:15.492641 52370 addons.go:239] Setting addon default-storageclass=true in "no-preload-047294"
W1102 13:46:15.492661 52370 addons.go:248] addon default-storageclass should already be in state true
I1102 13:46:15.492683 52370 host.go:66] Checking if "no-preload-047294" exists ...
I1102 13:46:15.493697 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:15.494462 52370 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1102 13:46:15.494479 52370 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1102 13:46:15.494792 52370 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1102 13:46:15.494801 52370 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1102 13:46:15.494811 52370 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1102 13:46:15.495005 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.495472   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.495505 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.495662   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.496015 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1102 13:46:15.496033 52370 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1102 13:46:15.496037 52370 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:46:15.496051 52370 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1102 13:46:15.497316 52370 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1102 13:46:15.497671 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:46:15.498541 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1102 13:46:15.498557 52370 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I1102 13:46:15.498589   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.498622 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.498859   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.499924 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:46:15.499938 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.500698   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.500729 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.500777   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.500812 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.500904   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                	I1102 13:46:15.501148   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.501880 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.502298   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.502323 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.502486   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.819044 52370 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:46:15.845525 52370 node_ready.go:35] waiting up to 6m0s for node "no-preload-047294" to be "Ready" ...
I1102 13:46:15.915566 52370 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:46:15.915591 52370 cache_images.go:86] Images are preloaded, skipping loading
I1102 13:46:15.915598 52370 cache_images.go:264] succeeded pushing to: no-preload-047294
I1102 13:46:15.944858 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1102 13:46:16.021389 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:46:16.052080 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1102 13:46:16.052111 52370 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1102 13:46:16.090741 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1102 13:46:16.090772 52370 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1102 13:46:16.196294 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1102 13:46:16.196322 52370 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1102 13:46:16.224986 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1102 13:46:16.225016 52370 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1102 13:46:16.286042 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:46:16.286065 52370 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1102 13:46:16.307552 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1102 13:46:16.307586 52370 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1102 13:46:16.366336 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1102 13:46:16.366388 52370 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1102 13:46:16.379015 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:46:16.471900 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1102 13:46:16.471927 52370 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1102 13:46:16.569682 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1102 13:46:16.569711 52370 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1102 13:46:16.657567 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1102 13:46:16.657596 52370 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1102 13:46:16.756983 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1102 13:46:16.757028 52370 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1102 13:46:16.809899 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:46:16.809928 52370 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1102 13:46:16.871393 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:46:14.218884 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:14.219557 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:14.219571 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:14.219887 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:14.219923 52806 retry.go:31] will retry after 2.93345401s: waiting for domain to come up
I1102 13:46:17.155807 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.156857 52806 main.go:143] libmachine: domain embed-certs-705938 has current primary IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.156885 52806 main.go:143] libmachine: found domain IP: 192.168.50.135
I1102 13:46:17.156899 52806 main.go:143] libmachine: reserving static IP address...
	I1102 13:46:17.157383   52806 main.go:143] libmachine: unable to find host DHCP lease matching {name: "embed-certs-705938", mac: "52:54:00:54:db:56", ip: "192.168.50.135"} in network mk-embed-certs-705938
                                                
                                                I1102 13:46:17.415133 52806 main.go:143] libmachine: reserved static IP address 192.168.50.135 for domain embed-certs-705938
I1102 13:46:17.415181 52806 main.go:143] libmachine: waiting for SSH...
I1102 13:46:17.415189 52806 main.go:143] libmachine: Getting to WaitForSSH function...
I1102 13:46:17.418689 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.419255   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:minikube Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.419298 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.419531 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.419883   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.419903 52806 main.go:143] libmachine: About to run SSH command:
exit 0
I1102 13:46:17.534488 52806 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1102 13:46:17.534961 52806 main.go:143] libmachine: domain creation complete
I1102 13:46:17.536764 52806 machine.go:94] provisionDockerMachine start ...
I1102 13:46:17.539495 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.539992   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.540028 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.540252 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.540560   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.540581 52806 main.go:143] libmachine: About to run SSH command:
hostname
I1102 13:46:17.652272 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1102 13:46:17.652300 52806 buildroot.go:166] provisioning hostname "embed-certs-705938"
I1102 13:46:17.655559 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.656028   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.656056 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.656296 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.656546   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.656563 52806 main.go:143] libmachine: About to run SSH command:
sudo hostname embed-certs-705938 && echo "embed-certs-705938" | sudo tee /etc/hostname
I1102 13:46:17.787544 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-705938
I1102 13:46:17.790765 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.791180   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.791204 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.791416 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.791651   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.791669 52806 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-705938' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-705938/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-705938' | sudo tee -a /etc/hosts;
fi
fi
W1102 13:46:17.850464 52370 node_ready.go:57] node "no-preload-047294" has "Ready":"False" status (will retry)
I1102 13:46:18.162216 52370 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (2.140782403s)
I1102 13:46:18.162218 52370 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.783147201s)
I1102 13:46:18.162277 52370 addons.go:480] Verifying addon metrics-server=true in "no-preload-047294"
I1102 13:46:18.382893 52370 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.511434576s)
I1102 13:46:18.384606 52370 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p no-preload-047294 addons enable metrics-server
I1102 13:46:18.386338 52370 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1102 13:46:18.387422 52370 addons.go:515] duration metric: took 2.899233176s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
I1102 13:46:19.849552 52370 node_ready.go:49] node "no-preload-047294" is "Ready"
I1102 13:46:19.849579 52370 node_ready.go:38] duration metric: took 4.004012408s for node "no-preload-047294" to be "Ready" ...
I1102 13:46:19.849594 52370 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:46:19.849637 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:19.896495 52370 api_server.go:72] duration metric: took 4.408344456s to wait for apiserver process to appear ...
I1102 13:46:19.896524 52370 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:46:19.896551 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:19.907446 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 200:
ok
I1102 13:46:19.909066 52370 api_server.go:141] control plane version: v1.34.1
I1102 13:46:19.909101 52370 api_server.go:131] duration metric: took 12.568216ms to wait for apiserver health ...
I1102 13:46:19.909111 52370 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:46:19.920317 52370 system_pods.go:59] 8 kube-system pods found
I1102 13:46:19.920358 52370 system_pods.go:61] "coredns-66bc5c9577-th5tq" [65f5112e-1f3c-4f25-b91a-aa016db03acd] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:46:19.920365 52370 system_pods.go:61] "etcd-no-preload-047294" [150d4c86-f602-4a05-a8d3-6f54c6402abc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:46:19.920375 52370 system_pods.go:61] "kube-apiserver-no-preload-047294" [6d35fe6a-5edf-4a16-a84f-fd8f527f48fe] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:46:19.920380 52370 system_pods.go:61] "kube-controller-manager-no-preload-047294" [888bc1fb-1c42-44a9-aa46-ba3e9ee49ee4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:46:19.920389 52370 system_pods.go:61] "kube-proxy-nw5rx" [5dc5f78c-c165-402a-b834-f2f64e5ac4e2] Running
I1102 13:46:19.920395 52370 system_pods.go:61] "kube-scheduler-no-preload-047294" [af7ccc77-6a6b-4960-b6e4-63c69def3029] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:46:19.920400 52370 system_pods.go:61] "metrics-server-746fcd58dc-6pkxd" [65a57acb-e4e7-4fec-b299-0ae0667ed73a] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:46:19.920405 52370 system_pods.go:61] "storage-provisioner" [d6458b18-8300-444e-9e82-75cb7ba64d82] Running
I1102 13:46:19.920412 52370 system_pods.go:74] duration metric: took 11.217536ms to wait for pod list to return data ...
I1102 13:46:19.920422 52370 default_sa.go:34] waiting for default service account to be created ...
I1102 13:46:19.924360 52370 default_sa.go:45] found service account: "default"
I1102 13:46:19.924386 52370 default_sa.go:55] duration metric: took 3.955386ms for default service account to be created ...
I1102 13:46:19.924397 52370 system_pods.go:116] waiting for k8s-apps to be running ...
I1102 13:46:19.927824 52370 system_pods.go:86] 8 kube-system pods found
I1102 13:46:19.927857 52370 system_pods.go:89] "coredns-66bc5c9577-th5tq" [65f5112e-1f3c-4f25-b91a-aa016db03acd] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:46:19.927868 52370 system_pods.go:89] "etcd-no-preload-047294" [150d4c86-f602-4a05-a8d3-6f54c6402abc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:46:19.927881 52370 system_pods.go:89] "kube-apiserver-no-preload-047294" [6d35fe6a-5edf-4a16-a84f-fd8f527f48fe] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:46:19.927894 52370 system_pods.go:89] "kube-controller-manager-no-preload-047294" [888bc1fb-1c42-44a9-aa46-ba3e9ee49ee4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:46:19.927904 52370 system_pods.go:89] "kube-proxy-nw5rx" [5dc5f78c-c165-402a-b834-f2f64e5ac4e2] Running
I1102 13:46:19.927914 52370 system_pods.go:89] "kube-scheduler-no-preload-047294" [af7ccc77-6a6b-4960-b6e4-63c69def3029] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:46:19.927924 52370 system_pods.go:89] "metrics-server-746fcd58dc-6pkxd" [65a57acb-e4e7-4fec-b299-0ae0667ed73a] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:46:19.927933 52370 system_pods.go:89] "storage-provisioner" [d6458b18-8300-444e-9e82-75cb7ba64d82] Running
I1102 13:46:19.927943 52370 system_pods.go:126] duration metric: took 3.538982ms to wait for k8s-apps to be running ...
I1102 13:46:19.927955 52370 system_svc.go:44] waiting for kubelet service to be running ....
I1102 13:46:19.928014 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1102 13:46:19.954056 52370 system_svc.go:56] duration metric: took 26.090682ms WaitForService to wait for kubelet
I1102 13:46:19.954091 52370 kubeadm.go:587] duration metric: took 4.465942504s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1102 13:46:19.954114 52370 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:46:19.957563 52370 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:46:19.957595 52370 node_conditions.go:123] node cpu capacity is 2
I1102 13:46:19.957612 52370 node_conditions.go:105] duration metric: took 3.491442ms to run NodePressure ...
I1102 13:46:19.957627 52370 start.go:242] waiting for startup goroutines ...
I1102 13:46:19.957643 52370 start.go:247] waiting for cluster config update ...
I1102 13:46:19.957658 52370 start.go:256] writing updated cluster config ...
I1102 13:46:19.957996 52370 ssh_runner.go:195] Run: rm -f paused
I1102 13:46:19.963044 52370 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:46:19.966820 52370 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-th5tq" in "kube-system" namespace to be "Ready" or be gone ...
W1102 13:46:21.973445 52370 pod_ready.go:104] pod "coredns-66bc5c9577-th5tq" is not "Ready", error: <nil>
I1102 13:46:17.916169 52806 main.go:143] libmachine: SSH cmd err, output: <nil>:
	I1102 13:46:17.916200   52806 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21808-9383/.minikube CaCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21808-9383/.minikube}
                                                
                                                I1102 13:46:17.916222 52806 buildroot.go:174] setting up certificates
I1102 13:46:17.916235 52806 provision.go:84] configureAuth start
I1102 13:46:17.919973 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.920554   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.920609 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.923643 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.924156   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.924187 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.924321 52806 provision.go:143] copyHostCerts
I1102 13:46:17.924394 52806 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem, removing ...
I1102 13:46:17.924416 52806 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem
I1102 13:46:17.924496 52806 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem (1123 bytes)
I1102 13:46:17.924606 52806 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem, removing ...
I1102 13:46:17.924617 52806 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem
I1102 13:46:17.924654 52806 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem (1675 bytes)
I1102 13:46:17.924721 52806 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem, removing ...
I1102 13:46:17.924729 52806 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem
I1102 13:46:17.924760 52806 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem (1082 bytes)
I1102 13:46:17.924820 52806 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem org=jenkins.embed-certs-705938 san=[127.0.0.1 192.168.50.135 embed-certs-705938 localhost minikube]
I1102 13:46:17.990201 52806 provision.go:177] copyRemoteCerts
I1102 13:46:17.990258 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1102 13:46:17.993216 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.993704   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.993740 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.993903   52806 sshutil.go:53] new ssh client: &{IP:192.168.50.135 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa Username:docker}
                                                
                                                I1102 13:46:18.081294 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1102 13:46:18.121653 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I1102 13:46:18.171219 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1102 13:46:18.212624 52806 provision.go:87] duration metric: took 296.37323ms to configureAuth
I1102 13:46:18.212658 52806 buildroot.go:189] setting minikube options for container-runtime
I1102 13:46:18.212891 52806 config.go:182] Loaded profile config "embed-certs-705938": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:46:18.216296 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:18.216768   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:18.216797 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:18.217050 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:18.217275   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:18.217289 52806 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1102 13:46:18.332652 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1102 13:46:18.332676 52806 buildroot.go:70] root file system type: tmpfs
I1102 13:46:18.332790 52806 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1102 13:46:18.336170 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:18.336665   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:18.336698 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:18.336965 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:18.337216   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:18.337304 52806 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1102 13:46:18.469436 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1102 13:46:18.472332 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:18.472755   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:18.472785 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:18.472963 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:18.473174   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:18.473190 52806 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
                                                
                                                I1102 13:46:19.439525 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1102 13:46:19.439552 52806 machine.go:97] duration metric: took 1.902770314s to provisionDockerMachine
I1102 13:46:19.439563 52806 client.go:176] duration metric: took 19.960023s to LocalClient.Create
I1102 13:46:19.439582 52806 start.go:167] duration metric: took 19.960095763s to libmachine.API.Create "embed-certs-705938"
I1102 13:46:19.439592 52806 start.go:293] postStartSetup for "embed-certs-705938" (driver="kvm2")
I1102 13:46:19.439604 52806 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1102 13:46:19.439663 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1102 13:46:19.443408 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.445129   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.445175 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.445388   52806 sshutil.go:53] new ssh client: &{IP:192.168.50.135 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa Username:docker}
                                                
                                                I1102 13:46:19.536215 52806 ssh_runner.go:195] Run: cat /etc/os-release
I1102 13:46:19.541490 52806 info.go:137] Remote host: Buildroot 2025.02
I1102 13:46:19.541514 52806 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/addons for local assets ...
I1102 13:46:19.541571 52806 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/files for local assets ...
I1102 13:46:19.541640 52806 filesync.go:149] local asset: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem -> 132702.pem in /etc/ssl/certs
I1102 13:46:19.541722 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1102 13:46:19.553427 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:19.583514 52806 start.go:296] duration metric: took 143.906312ms for postStartSetup
I1102 13:46:19.586330 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.586696   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.586720 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:19.586940 52806 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/config.json ...
I1102 13:46:19.587159 52806 start.go:128] duration metric: took 20.109926215s to createHost
I1102 13:46:19.589294 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.589616   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.589639 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:19.589812 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:19.590068   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:19.590081 52806 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1102 13:46:19.697000 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: 1762091179.664541650
I1102 13:46:19.697035 52806 fix.go:216] guest clock: 1762091179.664541650
I1102 13:46:19.697045 52806 fix.go:229] Guest: 2025-11-02 13:46:19.66454165 +0000 UTC Remote: 2025-11-02 13:46:19.587172147 +0000 UTC m=+51.820715414 (delta=77.369503ms)
I1102 13:46:19.697069 52806 fix.go:200] guest clock delta is within tolerance: 77.369503ms
I1102 13:46:19.697075 52806 start.go:83] releasing machines lock for "embed-certs-705938", held for 20.220009213s
I1102 13:46:19.700081 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.700529   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.700564 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:19.700817 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:46:19.700870 52806 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:46:19.700882 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:46:19.700918 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:46:19.700956 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:46:19.700992 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:46:19.701052 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:19.701138 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:46:19.703264 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.703658   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.703684 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.703829   52806 sshutil.go:53] new ssh client: &{IP:192.168.50.135 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa Username:docker}
                                                
                                                I1102 13:46:19.809921 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:46:19.839404 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:46:19.869848 52806 ssh_runner.go:195] Run: openssl version
I1102 13:46:19.880751 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:46:19.898132 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:19.905270 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:19.905327 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:19.914387 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:46:19.928810 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:46:19.942549 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:46:19.947838 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:46:19.947903 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:46:19.956624 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:46:19.971483 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:46:19.986416 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:46:19.991713 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:46:19.991778 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:46:20.000084 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:46:20.014637 52806 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-certificates >/dev/null 2>&1 && sudo update-ca-certificates || true"
I1102 13:46:20.019384 52806 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-trust >/dev/null 2>&1 && sudo update-ca-trust extract || true"
I1102 13:46:20.024376 52806 ssh_runner.go:195] Run: cat /version.json
I1102 13:46:20.024475 52806 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1102 13:46:20.037036 52806 ssh_runner.go:195] Run: systemctl --version
I1102 13:46:20.059823 52806 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1102 13:46:20.066621 52806 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1102 13:46:20.066689   52806 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
                                                
                                                I1102 13:46:20.091927 52806 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1102 13:46:20.091955 52806 start.go:496] detecting cgroup driver to use...
I1102 13:46:20.092085 52806 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:46:20.118748 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1102 13:46:20.133743 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1102 13:46:20.149619 52806 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1102 13:46:20.149693 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1102 13:46:20.164558 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:20.184822 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1102 13:46:20.201265 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:20.220488 52806 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1102 13:46:20.250102 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1102 13:46:20.278612 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1102 13:46:20.309477 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1102 13:46:20.327666 52806 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1102 13:46:20.340974 52806 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1102 13:46:20.341039 52806 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1102 13:46:20.354897 52806 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1102 13:46:20.369758 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:20.554281 52806 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1102 13:46:20.607028 52806 start.go:496] detecting cgroup driver to use...
I1102 13:46:20.607123 52806 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1102 13:46:20.628869 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:20.647644 52806 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1102 13:46:20.669638 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:20.687700 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:20.705265 52806 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1102 13:46:20.744370 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:20.763516 52806 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:46:20.788424 52806 ssh_runner.go:195] Run: which cri-dockerd
I1102 13:46:20.793415 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1102 13:46:20.810121 52806 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1102 13:46:20.832924 52806 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1102 13:46:20.998814 52806 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1102 13:46:21.155552 52806 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1102 13:46:21.155678 52806 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1102 13:46:21.177952 52806 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:46:21.193864 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:21.345292 52806 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:46:21.823497 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1102 13:46:21.839482 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1102 13:46:21.855805 52806 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1102 13:46:21.875591 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:21.891152 52806 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1102 13:46:22.040770 52806 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1102 13:46:22.187323 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:22.362141 52806 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1102 13:46:22.413868 52806 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:22.429826 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:22.603137 52806 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:22.723299 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:22.746135 52806 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1102 13:46:22.746228 52806 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1102 13:46:22.755329 52806 start.go:564] Will wait 60s for crictl version
I1102 13:46:22.755401 52806 ssh_runner.go:195] Run: which crictl
I1102 13:46:22.761309 52806 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1102 13:46:22.817847 52806 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
	I1102 13:46:22.817936   52806 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                	I1102 13:46:22.848957   52806 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                W1102 13:46:23.976265 52370 pod_ready.go:104] pod "coredns-66bc5c9577-th5tq" is not "Ready", error: <nil>
I1102 13:46:24.975676 52370 pod_ready.go:94] pod "coredns-66bc5c9577-th5tq" is "Ready"
I1102 13:46:24.975717 52370 pod_ready.go:86] duration metric: took 5.00887023s for pod "coredns-66bc5c9577-th5tq" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:24.979834 52370 pod_ready.go:83] waiting for pod "etcd-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:24.995163 52370 pod_ready.go:94] pod "etcd-no-preload-047294" is "Ready"
I1102 13:46:24.995215 52370 pod_ready.go:86] duration metric: took 15.356364ms for pod "etcd-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:25.003843 52370 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
W1102 13:46:27.014037 52370 pod_ready.go:104] pod "kube-apiserver-no-preload-047294" is not "Ready", error: <nil>
I1102 13:46:22.882470 52806 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:46:22.886180 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:22.886769   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:22.886809 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:22.887103 52806 ssh_runner.go:195] Run: grep 192.168.50.1 host.minikube.internal$ /etc/hosts
	I1102 13:46:22.892228   52806 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.50.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                	I1102 13:46:22.908232   52806 kubeadm.go:884] updating cluster {Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
                                                
                                                .34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.50.135 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker Bina
                                                
                                                ryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:46:22.908443 52806 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:46:22.908529   52806 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:22.931739 52806 docker.go:691] Got preloaded images:
I1102 13:46:22.931772 52806 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.1 wasn't preloaded
I1102 13:46:22.931831 52806 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1102 13:46:22.944369 52806 ssh_runner.go:195] Run: which lz4
I1102 13:46:22.949025 52806 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
I1102 13:46:22.954479 52806 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/preloaded.tar.lz4': No such file or directory
I1102 13:46:22.954523 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (353378914 bytes)
I1102 13:46:24.467441 52806 docker.go:655] duration metric: took 1.518464633s to copy over tarball
I1102 13:46:24.467523 52806 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
I1102 13:46:25.935982 52806 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.468432911s)
I1102 13:46:25.936019 52806 ssh_runner.go:146] rm: /preloaded.tar.lz4
I1102 13:46:25.973464 52806 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1102 13:46:25.985858 52806 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
I1102 13:46:26.006812 52806 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:46:26.021974 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:26.196874 52806 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:46:29.016780 52370 pod_ready.go:94] pod "kube-apiserver-no-preload-047294" is "Ready"
I1102 13:46:29.016820 52370 pod_ready.go:86] duration metric: took 4.01294547s for pod "kube-apiserver-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.021107 52370 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.027163 52370 pod_ready.go:94] pod "kube-controller-manager-no-preload-047294" is "Ready"
I1102 13:46:29.027191 52370 pod_ready.go:86] duration metric: took 6.0502ms for pod "kube-controller-manager-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.030257 52370 pod_ready.go:83] waiting for pod "kube-proxy-nw5rx" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.035459 52370 pod_ready.go:94] pod "kube-proxy-nw5rx" is "Ready"
I1102 13:46:29.035493 52370 pod_ready.go:86] duration metric: took 5.206198ms for pod "kube-proxy-nw5rx" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.039535 52370 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.238466 52370 pod_ready.go:94] pod "kube-scheduler-no-preload-047294" is "Ready"
I1102 13:46:29.238500 52370 pod_ready.go:86] duration metric: took 198.942184ms for pod "kube-scheduler-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.238516 52370 pod_ready.go:40] duration metric: took 9.275441602s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:46:29.299272 52370 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0)
I1102 13:46:29.301171 52370 out.go:179] * Done! kubectl is now configured to use "no-preload-047294" cluster and "default" namespace by default
I1102 13:46:29.244892 52806 ssh_runner.go:235] Completed: sudo systemctl restart docker: (3.04797346s)
	I1102 13:46:29.244991   52806 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:29.271075 52806 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:46:29.271116 52806 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:46:29.271130   52806 kubeadm.go:935] updating node { 192.168.50.135 8443 v1.34.1 docker true true} ...
                                                
                                                I1102 13:46:29.271241 52806 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-705938 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.50.135
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:46:29.271306   52806 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:46:29.330903 52806 cni.go:84] Creating CNI manager for ""
I1102 13:46:29.330935 52806 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:46:29.330949 52806 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1102 13:46:29.330975   52806 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.50.135 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-705938 NodeName:embed-certs-705938 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.50.135"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.50.135 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
                                                
                                                dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:46:29.331165 52806 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.50.135
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "embed-certs-705938"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.50.135"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.50.135"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:46:29.331238 52806 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:46:29.345091 52806 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:46:29.345162 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:46:29.358857 52806 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (319 bytes)
I1102 13:46:29.382829 52806 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:46:29.407569 52806 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
I1102 13:46:29.435296 52806 ssh_runner.go:195] Run: grep 192.168.50.135 control-plane.minikube.internal$ /etc/hosts
	I1102 13:46:29.439901   52806 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.50.135	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:46:29.455934 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:29.619190 52806 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:46:29.653107 52806 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938 for IP: 192.168.50.135
I1102 13:46:29.653135 52806 certs.go:195] generating shared ca certs ...
	I1102 13:46:29.653158   52806 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:29.653386 52806 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:46:29.653455 52806 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:46:29.653470 52806 certs.go:257] generating profile certs ...
I1102 13:46:29.653543 52806 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.key
I1102 13:46:29.653561 52806 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.crt with IP's: []
I1102 13:46:29.893751 52806 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.crt ...
	I1102 13:46:29.893784   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.crt: {Name:mk6a7ff5531c8d47f764b9b3f8a2b9684864662f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:29.893948 52806 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.key ...
	I1102 13:46:29.893961   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.key: {Name:mk3e34eea0dea3b01b1d79fb7864361caeccf43b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:29.894037 52806 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9
I1102 13:46:29.894054 52806 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.50.135]
I1102 13:46:30.525555 52806 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9 ...
	I1102 13:46:30.525583   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9: {Name:mkb6d8b5233847de816b3d65ee8c7a12eff19517 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.525775 52806 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9 ...
	I1102 13:46:30.525797   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9: {Name:mk20f67c0c009e78f2c3037ed1603d1fad28fffa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.525890 52806 certs.go:382] copying /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9 -> /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt
I1102 13:46:30.525957 52806 certs.go:386] copying /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9 -> /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key
I1102 13:46:30.526012 52806 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key
I1102 13:46:30.526027 52806 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt with IP's: []
I1102 13:46:30.638271 52806 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt ...
	I1102 13:46:30.638301   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt: {Name:mk3dac91a58b003a9db0fc034b457a850bea98c4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.638492 52806 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key ...
	I1102 13:46:30.638511   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key: {Name:mkf0dc7f3c198ddf7eee23643f3e8cc58cf6505a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.638773 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:46:30.638826 52806 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:46:30.638838 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:46:30.638859 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:46:30.638881 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:46:30.638901 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:46:30.638939 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:30.639519 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:46:30.674138 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:46:30.703977 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:46:30.734195 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:46:30.764670 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
I1102 13:46:30.794535 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1102 13:46:30.825092 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:46:30.856377 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1102 13:46:30.887933 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:46:30.919173 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:46:30.952120 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:46:30.984088 52806 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:46:31.005017 52806 ssh_runner.go:195] Run: openssl version
I1102 13:46:31.011314 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:46:31.024540 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:31.029805 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:31.029866 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:31.037318 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:46:31.048848 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:46:31.063024 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:46:31.068655 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:46:31.068711 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:46:31.076429 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:46:31.090294 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:46:31.106103 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:46:31.111708 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:46:31.111774 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:46:31.119459 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:46:31.133305 52806 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:46:31.138171 52806 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I1102 13:46:31.138225   52806 kubeadm.go:401] StartCluster: {Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
                                                
                                                .1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.50.135 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryM
                                                
                                                irror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:46:31.138356   52806 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:46:31.159777 52806 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:46:31.174591 52806 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:46:31.188149 52806 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:46:31.199556 52806 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:46:31.199580 52806 kubeadm.go:158] found existing configuration files:
I1102 13:46:31.199642 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1102 13:46:31.210416 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:46:31.210474 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:46:31.221683 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1102 13:46:31.232410 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:46:31.232471 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:46:31.244414 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1102 13:46:31.258650 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:46:31.258712 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:46:31.276559 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1102 13:46:31.291616 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:46:31.291687 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:46:31.304373 52806 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1102 13:46:31.449184 52806 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
==> Docker <==
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.757521674Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.757621103Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.764517503Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.764566038Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:45:48 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:45:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ef8db52292f8faa4336702a5689c4aadadefb916cb189c72d5c2b1fefc930db3/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Nov 02 13:45:48 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:45:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a0100eda4036460db98838fc48d07f6b6fd8700c11b989bf7487f9e41aa9668b/resolv.conf as [nameserver 192.168.122.1]"
Nov 02 13:45:59 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:59.663521843Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:45:59 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:59.735231937Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:45:59 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:59.735481063Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 02 13:45:59 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:45:59Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.601657354Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.601789643Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.605681369Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.605739290Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:03 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:03.687296590Z" level=info msg="ignoring event" container=b95dfdd056106acfa9cdc1cf1ba50a3780d9d74986af960c5d2bdc429f93db22 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Nov 02 13:46:37 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:46:37Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
                                                
                                                Nov 02 13:46:37 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:46:37Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-kvgcr_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"282b9f80c34567e9a258a7104324cc16ebe1953cc5052244da256a206b63b697\""
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.294547278Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.295850957Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.300503039Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.300571465Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.418312647Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.502212145Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.502318651Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 02 13:46:38 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:46:38Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
d3dc8b7583ba2 6e38f40d628db Less than a second ago Running storage-provisioner 2 6936b7f5fd891 storage-provisioner
b1dc1a770a414 56cc512116c8f 50 seconds ago Running busybox 1 ef8db52292f8f busybox
89d0786d3647a 52546a367cc9e 50 seconds ago Running coredns 1 a0100eda40364 coredns-66bc5c9577-bnv4n
1019700155db8 kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 52 seconds ago Running kubernetes-dashboard 0 399dad2b92a36 kubernetes-dashboard-855c9754f9-2f7cj
b95dfdd056106 6e38f40d628db About a minute ago Exited storage-provisioner 1 6936b7f5fd891 storage-provisioner
8965cdb1826d1 fc25172553d79 About a minute ago Running kube-proxy 1 5d16aead1b590 kube-proxy-5qv84
5ce993c62ff5a 5f1f5298c888d About a minute ago Running etcd 1 387fbeb352456 etcd-default-k8s-diff-port-311562
a38000da02ecd c80c8dbafe7dd About a minute ago Running kube-controller-manager 1 e876eb1a5a629 kube-controller-manager-default-k8s-diff-port-311562
396a831dc8f66 7dd6aaa1717ab About a minute ago Running kube-scheduler 1 40e0522270828 kube-scheduler-default-k8s-diff-port-311562
5d1c57648acf7 c3994bc696102 About a minute ago Running kube-apiserver 1 bdda83de52444 kube-apiserver-default-k8s-diff-port-311562
08b56ffa53d8c gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e About a minute ago Exited busybox 0 c144cc94943b5 busybox
0bebcbbb6c23d 52546a367cc9e 2 minutes ago Exited coredns 0 dfd9d91f746b2 coredns-66bc5c9577-bnv4n
859f8a49dca1a fc25172553d79 2 minutes ago Exited kube-proxy 0 38a8172039765 kube-proxy-5qv84
326385a03f875 c3994bc696102 2 minutes ago Exited kube-apiserver 0 20a067500af02 kube-apiserver-default-k8s-diff-port-311562
3db15e0e0d1e0 7dd6aaa1717ab 2 minutes ago Exited kube-scheduler 0 9c5c22752c78c kube-scheduler-default-k8s-diff-port-311562
693e1d6c029a6 c80c8dbafe7dd 2 minutes ago Exited kube-controller-manager 0 4f4d927b9b110 kube-controller-manager-default-k8s-diff-port-311562
3d076807ce989 5f1f5298c888d 2 minutes ago Exited etcd 0 efae7bca22ce8 etcd-default-k8s-diff-port-311562
==> coredns [0bebcbbb6c23] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [89d0786d3647] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:35717 - 28729 "HINFO IN 7560457005306701952.7607275637384721463. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.111534194s
==> describe nodes <==
Name: default-k8s-diff-port-311562
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=default-k8s-diff-port-311562
kubernetes.io/os=linux
minikube.k8s.io/commit=170a9221ec214abbddb4c7cdac340516a92b239a
minikube.k8s.io/name=default-k8s-diff-port-311562
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_02T13_44_20_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 02 Nov 2025 13:44:16 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: default-k8s-diff-port-311562
AcquireTime: <unset>
RenewTime: Sun, 02 Nov 2025 13:46:37 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:44:14 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:44:14 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:44:14 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:45:39 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.83.253
Hostname: default-k8s-diff-port-311562
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: 96534bedb89b40eeaf23f1340aeb91ac
System UUID: 96534bed-b89b-40ee-af23-f1340aeb91ac
Boot ID: ca22851d-b60f-48c3-875e-f0d6e84558c1
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.1
Kubelet Version: v1.34.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m
kube-system coredns-66bc5c9577-bnv4n 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m13s
kube-system etcd-default-k8s-diff-port-311562 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m18s
kube-system kube-apiserver-default-k8s-diff-port-311562 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m18s
kube-system kube-controller-manager-default-k8s-diff-port-311562 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m21s
kube-system kube-proxy-5qv84 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m14s
kube-system kube-scheduler-default-k8s-diff-port-311562 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m18s
kube-system metrics-server-746fcd58dc-tcttv 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 109s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m10s
kubernetes-dashboard dashboard-metrics-scraper-6ffb444bf9-tk9xk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 62s
kubernetes-dashboard kubernetes-dashboard-855c9754f9-2f7cj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 62s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m11s kube-proxy
Normal Starting 65s kube-proxy
Normal NodeHasNoDiskPressure 2m18s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasNoDiskPressure
Normal NodeAllocatableEnforced 2m18s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2m18s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientMemory
Normal NodeHasSufficientPID 2m18s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientPID
Normal Starting 2m18s kubelet Starting kubelet.
Normal NodeReady 2m17s kubelet Node default-k8s-diff-port-311562 status is now: NodeReady
Normal RegisteredNode 2m14s node-controller Node default-k8s-diff-port-311562 event: Registered Node default-k8s-diff-port-311562 in Controller
Normal NodeHasNoDiskPressure 71s (x8 over 71s) kubelet Node default-k8s-diff-port-311562 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientMemory 71s (x8 over 71s) kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientMemory
Normal Starting 71s kubelet Starting kubelet.
Normal NodeHasSufficientPID 71s (x7 over 71s) kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 71s kubelet Updated Node Allocatable limit across pods
Warning Rebooted 66s kubelet Node default-k8s-diff-port-311562 has been rebooted, boot id: ca22851d-b60f-48c3-875e-f0d6e84558c1
Normal RegisteredNode 63s node-controller Node default-k8s-diff-port-311562 event: Registered Node default-k8s-diff-port-311562 in Controller
Normal Starting 2s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 1s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 1s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 1s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 1s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientPID
==> dmesg <==
[Nov 2 13:45] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.000053] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.006822] (rpcbind)[118]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.984941] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000025] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.129738] kauditd_printk_skb: 161 callbacks suppressed
[ +0.100494] kauditd_printk_skb: 289 callbacks suppressed
[ +5.715230] kauditd_printk_skb: 165 callbacks suppressed
[ +4.746976] kauditd_printk_skb: 134 callbacks suppressed
[ +6.790105] kauditd_printk_skb: 141 callbacks suppressed
[Nov 2 13:46] kauditd_printk_skb: 132 callbacks suppressed
[ +0.215790] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [3d076807ce98] <==
	{"level":"warn","ts":"2025-11-02T13:44:28.710383Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"132.249203ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/storageclasses/standard\" limit:1 ","response":"range_response_count:1 size:992"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.710431Z","caller":"traceutil/trace.go:172","msg":"trace[77773902] range","detail":"{range_begin:/registry/storageclasses/standard; range_end:; response_count:1; response_revision:362; }","duration":"132.31132ms","start":"2025-11-02T13:44:28.578110Z","end":"2025-11-02T13:44:28.710421Z","steps":["trace[77773902] 'agreement among raft nodes before linearized reading'  (duration: 113.91205ms)","trace[77773902] 'range keys from in-memory index tree'  (duration: 18.249766ms)"],"step_count":2}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.710599Z","caller":"traceutil/trace.go:172","msg":"trace[35326147] transaction","detail":"{read_only:false; response_revision:364; number_of_response:1; }","duration":"146.846351ms","start":"2025-11-02T13:44:28.563740Z","end":"2025-11-02T13:44:28.710586Z","steps":["trace[35326147] 'process raft request'  (duration: 146.798823ms)"],"step_count":1}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.710750Z","caller":"traceutil/trace.go:172","msg":"trace[1099436701] transaction","detail":"{read_only:false; response_revision:363; number_of_response:1; }","duration":"150.90502ms","start":"2025-11-02T13:44:28.559836Z","end":"2025-11-02T13:44:28.710741Z","steps":["trace[1099436701] 'process raft request'  (duration: 132.240173ms)","trace[1099436701] 'compare'  (duration: 18.185172ms)"],"step_count":2}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:28.711806Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"120.468313ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-311562\" limit:1 ","response":"range_response_count:1 size:5058"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.713796Z","caller":"traceutil/trace.go:172","msg":"trace[1836530902] range","detail":"{range_begin:/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-311562; range_end:; response_count:1; response_revision:364; }","duration":"122.467813ms","start":"2025-11-02T13:44:28.591320Z","end":"2025-11-02T13:44:28.713788Z","steps":["trace[1836530902] 'agreement among raft nodes before linearized reading'  (duration: 120.401561ms)"],"step_count":1}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.713620Z","caller":"traceutil/trace.go:172","msg":"trace[996614801] transaction","detail":"{read_only:false; response_revision:365; number_of_response:1; }","duration":"119.843122ms","start":"2025-11-02T13:44:28.593765Z","end":"2025-11-02T13:44:28.713609Z","steps":["trace[996614801] 'process raft request'  (duration: 118.595346ms)"],"step_count":1}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:50.852268Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:50.852399Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"default-k8s-diff-port-311562","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.253:2380"],"advertise-client-urls":["https://192.168.83.253:2379"]}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:50.852551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.856103Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.862277Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.863439Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"499efbce2300e1e","current-leader-member-id":"499efbce2300e1e"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.863819Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.863947Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.864081Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.864504Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.83.253:2379: use of closed network connection"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.864685Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.83.253:2379: use of closed network connection"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.864841Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.253:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.864930Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.865110Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.869667Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.83.253:2380"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.869751Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.253:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.869819Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.83.253:2380"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.869925Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"default-k8s-diff-port-311562","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.253:2380"],"advertise-client-urls":["https://192.168.83.253:2379"]}
                                                
                                                ==> etcd [5ce993c62ff5] <==
	{"level":"warn","ts":"2025-11-02T13:45:31.330145Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38946","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.343996Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38960","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.361984Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38976","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.375631Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38994","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.390071Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39026","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.405435Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39042","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.419030Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39072","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.439543Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39092","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.447139Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39122","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.455878Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39130","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.466974Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39158","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.484514Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39168","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.498690Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39186","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.528507Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39198","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.562814Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39216","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.571971Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39232","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.583731Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39250","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.593158Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39254","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.610839Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39284","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.670266Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39302","server-name":"","error":"EOF"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:45:46.220299Z","caller":"traceutil/trace.go:172","msg":"trace[64781144] linearizableReadLoop","detail":"{readStateIndex:691; appliedIndex:691; }","duration":"376.246241ms","start":"2025-11-02T13:45:45.844031Z","end":"2025-11-02T13:45:46.220277Z","steps":["trace[64781144] 'read index received'  (duration: 376.241187ms)","trace[64781144] 'applied index is now lower than readState.Index'  (duration: 4.383µs)"],"step_count":2}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:46.220560Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"376.526623ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:45:46.220643Z","caller":"traceutil/trace.go:172","msg":"trace[815114890] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:652; }","duration":"376.698548ms","start":"2025-11-02T13:45:45.843937Z","end":"2025-11-02T13:45:46.220635Z","steps":["trace[815114890] 'agreement among raft nodes before linearized reading'  (duration: 376.504996ms)"],"step_count":1}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:46.222967Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"194.912257ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/coredns-66bc5c9577-bnv4n\" limit:1 ","response":"range_response_count:1 size:5703"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:45:46.223153Z","caller":"traceutil/trace.go:172","msg":"trace[59260110] range","detail":"{range_begin:/registry/pods/kube-system/coredns-66bc5c9577-bnv4n; range_end:; response_count:1; response_revision:652; }","duration":"195.053917ms","start":"2025-11-02T13:45:46.028034Z","end":"2025-11-02T13:45:46.223088Z","steps":["trace[59260110] 'agreement among raft nodes before linearized reading'  (duration: 194.846556ms)"],"step_count":1}
                                                
                                                ==> kernel <==
13:46:39 up 1 min, 0 users, load average: 1.71, 0.58, 0.21
Linux default-k8s-diff-port-311562 6.6.95 #1 SMP PREEMPT_DYNAMIC Tue Oct 28 16:58:05 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [326385a03f87] <==
	W1102 13:45:00.210906       1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.243433       1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.262428       1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.318835       1 logging.go:55] [core] [Channel #4 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.419905       1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.423645       1 logging.go:55] [core] [Channel #13 SubChannel #15]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.432493       1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.443372       1 logging.go:55] [core] [Channel #247 SubChannel #249]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.498105       1 logging.go:55] [core] [Channel #183 SubChannel #185]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.538015       1 logging.go:55] [core] [Channel #171 SubChannel #173]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.543596       1 logging.go:55] [core] [Channel #27 SubChannel #29]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.556876       1 logging.go:55] [core] [Channel #175 SubChannel #177]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.604554       1 logging.go:55] [core] [Channel #111 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.621116       1 logging.go:55] [core] [Channel #63 SubChannel #65]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.632390       1 logging.go:55] [core] [Channel #187 SubChannel #189]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.632397       1 logging.go:55] [core] [Channel #239 SubChannel #241]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.664868       1 logging.go:55] [core] [Channel #59 SubChannel #61]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.713717       1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.731482       1 logging.go:55] [core] [Channel #235 SubChannel #237]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.751716       1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.776547       1 logging.go:55] [core] [Channel #131 SubChannel #133]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.830551       1 logging.go:55] [core] [Channel #67 SubChannel #69]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.861394       1 logging.go:55] [core] [Channel #87 SubChannel #89]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.893564       1 logging.go:55] [core] [Channel #195 SubChannel #197]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.920873       1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                ==> kube-apiserver [5d1c57648acf] <==
W1102 13:45:33.374282 1 handler_proxy.go:99] no RequestInfo found in the context
E1102 13:45:33.374620 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1102 13:45:33.374646 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1102 13:45:34.384503 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1102 13:45:34.454503 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1102 13:45:34.526510 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1102 13:45:34.539619 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1102 13:45:35.849585 1 controller.go:667] quota admission added evaluator for: endpoints
I1102 13:45:36.004255 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1102 13:45:36.196794 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1102 13:45:36.547782 1 controller.go:667] quota admission added evaluator for: namespaces
	I1102 13:45:36.984810       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.102.109.123"}
                                                
                                                	I1102 13:45:37.005946       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.98.142.247"}
                                                
                                                W1102 13:46:36.461564 1 handler_proxy.go:99] no RequestInfo found in the context
E1102 13:46:36.461774 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1102 13:46:36.461799 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1102 13:46:36.490050 1 handler_proxy.go:99] no RequestInfo found in the context
E1102 13:46:36.491243 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1102 13:46:36.491555 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [693e1d6c029a] <==
I1102 13:44:24.032230 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I1102 13:44:24.033458 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1102 13:44:24.034449 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1102 13:44:24.043978 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I1102 13:44:24.052411 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I1102 13:44:24.065329 1 shared_informer.go:356] "Caches are synced" controller="job"
I1102 13:44:24.071035 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1102 13:44:24.071083 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1102 13:44:24.071089 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1102 13:44:24.072999 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I1102 13:44:24.073369 1 shared_informer.go:356] "Caches are synced" controller="service account"
I1102 13:44:24.073659 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1102 13:44:24.073772 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I1102 13:44:24.074694 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I1102 13:44:24.082468 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1102 13:44:24.082876 1 shared_informer.go:356] "Caches are synced" controller="node"
I1102 13:44:24.085205 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I1102 13:44:24.085454 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I1102 13:44:24.085624 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1102 13:44:24.085762 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1102 13:44:24.089290 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1102 13:44:24.089578 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1102 13:44:24.089928 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="default-k8s-diff-port-311562"
I1102 13:44:24.092875 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I1102 13:44:24.115542 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="default-k8s-diff-port-311562" podCIDRs=["10.244.0.0/24"]
==> kube-controller-manager [a38000da02ec] <==
I1102 13:45:35.871183 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I1102 13:45:35.881200 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1102 13:45:35.884409 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I1102 13:45:35.884481 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I1102 13:45:35.887846 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1102 13:45:35.887856 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I1102 13:45:35.887966 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1102 13:45:35.887981 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1102 13:45:35.888061 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="default-k8s-diff-port-311562"
I1102 13:45:35.888110 1 node_lifecycle_controller.go:1025] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
I1102 13:45:35.888451 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1102 13:45:35.888461 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1102 13:45:35.888467 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1102 13:45:35.894403 1 shared_informer.go:356] "Caches are synced" controller="job"
I1102 13:45:35.907417 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I1102 13:45:35.922833 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
E1102 13:45:36.762730 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.799547 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.816443 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.816681 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.832832 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.837400 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1102 13:45:40.889022 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
E1102 13:46:36.518593 1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
I1102 13:46:36.537792 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
==> kube-proxy [859f8a49dca1] <==
I1102 13:44:26.731509 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1102 13:44:26.834249 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1102 13:44:26.834385 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.253"]
E1102 13:44:26.837097 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1102 13:44:27.009331 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1102 13:44:27.009396 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1102 13:44:27.009428 1 server_linux.go:132] "Using iptables Proxier"
I1102 13:44:27.053522 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1102 13:44:27.054108 1 server.go:527] "Version info" version="v1.34.1"
I1102 13:44:27.054173 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1102 13:44:27.079784 1 config.go:200] "Starting service config controller"
I1102 13:44:27.079817 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1102 13:44:27.081693 1 config.go:106] "Starting endpoint slice config controller"
I1102 13:44:27.082792 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1102 13:44:27.082853 1 config.go:403] "Starting serviceCIDR config controller"
I1102 13:44:27.082878 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1102 13:44:27.085725 1 config.go:309] "Starting node config controller"
I1102 13:44:27.085738 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1102 13:44:27.181791 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1102 13:44:27.182915 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1102 13:44:27.183118 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1102 13:44:27.186936 1 shared_informer.go:356] "Caches are synced" controller="node config"
==> kube-proxy [8965cdb1826d] <==
I1102 13:45:33.789139 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1102 13:45:33.890490 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1102 13:45:33.890552 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.253"]
E1102 13:45:33.890670 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1102 13:45:33.941906 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1102 13:45:33.941978 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1102 13:45:33.942013 1 server_linux.go:132] "Using iptables Proxier"
I1102 13:45:33.952403 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1102 13:45:33.953896 1 server.go:527] "Version info" version="v1.34.1"
I1102 13:45:33.953939 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1102 13:45:33.966192 1 config.go:200] "Starting service config controller"
I1102 13:45:33.966301 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1102 13:45:33.966403 1 config.go:106] "Starting endpoint slice config controller"
I1102 13:45:33.966477 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1102 13:45:33.966505 1 config.go:403] "Starting serviceCIDR config controller"
I1102 13:45:33.966585 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1102 13:45:33.969664 1 config.go:309] "Starting node config controller"
I1102 13:45:33.969703 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1102 13:45:33.969711 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1102 13:45:34.066523 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1102 13:45:34.066534 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1102 13:45:34.066730 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-scheduler [396a831dc8f6] <==
I1102 13:45:30.213095 1 serving.go:386] Generated self-signed cert in-memory
W1102 13:45:32.329054 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1102 13:45:32.329109 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1102 13:45:32.329124 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W1102 13:45:32.329130 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1102 13:45:32.441927 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.1"
I1102 13:45:32.442042 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1102 13:45:32.445945 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1102 13:45:32.446004 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1102 13:45:32.446453 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1102 13:45:32.446673 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1102 13:45:32.547060 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kube-scheduler [3db15e0e0d1e] <==
E1102 13:44:16.975955 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1102 13:44:16.976013 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1102 13:44:16.976607 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1102 13:44:16.978344 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1102 13:44:17.808669 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1102 13:44:17.882355 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1102 13:44:17.895093 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1102 13:44:17.951002 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1102 13:44:17.977073 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1102 13:44:17.986009 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1102 13:44:18.011730 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1102 13:44:18.046027 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1102 13:44:18.060213 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1102 13:44:18.120966 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1102 13:44:18.133736 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1102 13:44:18.239035 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1102 13:44:18.280548 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E1102 13:44:18.416388 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
I1102 13:44:21.239623 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1102 13:44:50.998036 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1102 13:44:50.998908 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1102 13:44:51.001979 1 server.go:265] "[graceful-termination] secure server is exiting"
I1102 13:44:51.002470 1 configmap_cafile_content.go:226] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E1102 13:44:51.002473 1 run.go:72] "command failed" err="finished without leader elect"
I1102 13:44:50.998192 1 tlsconfig.go:258] "Shutting down DynamicServingCertificateController"
==> kubelet <==
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:37.536624 4148 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c5c22752c78c30bcc6d738f36262c2e4e32844883048aaf960fcb123556e4dc"
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:37.536946 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:37.551278 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:37.966742 4148 apiserver.go:52] "Watching apiserver"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.017306 4148 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.051537 4148 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/11842593-1fe8-476c-a692-ecdecf44fafa-xtables-lock\") pod \"kube-proxy-5qv84\" (UID: \"11842593-1fe8-476c-a692-ecdecf44fafa\") " pod="kube-system/kube-proxy-5qv84"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.051606 4148 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/11842593-1fe8-476c-a692-ecdecf44fafa-lib-modules\") pod \"kube-proxy-5qv84\" (UID: \"11842593-1fe8-476c-a692-ecdecf44fafa\") " pod="kube-system/kube-proxy-5qv84"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.051714 4148 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/ce313798-c158-4174-aec9-8d1e48caceea-tmp\") pod \"storage-provisioner\" (UID: \"ce313798-c158-4174-aec9-8d1e48caceea\") " pod="kube-system/storage-provisioner"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.278280 4148 scope.go:117] "RemoveContainer" containerID="b95dfdd056106acfa9cdc1cf1ba50a3780d9d74986af960c5d2bdc429f93db22"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.301712 4148 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.302320 4148 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.302814 4148 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-tcttv_kube-system(e9fc9174-d97e-4486-a4da-a405ebd4a7f3): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.302892 4148 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-tcttv" podUID="e9fc9174-d97e-4486-a4da-a405ebd4a7f3"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511095 4148 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511160 4148 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511235 4148 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-tk9xk_kubernetes-dashboard(dfc3630d-85e5-4ff8-8cd0-d0de23d3753a): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511277 4148 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-tk9xk" podUID="dfc3630d-85e5-4ff8-8cd0-d0de23d3753a"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.599294 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.599738 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.600064 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.600308 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.640609 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-controller-manager-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.645224 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.650457 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-default-k8s-diff-port-311562\" already exists" pod="kube-system/etcd-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.660589 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-apiserver-default-k8s-diff-port-311562"
==> kubernetes-dashboard [1019700155db] <==
2025/11/02 13:45:46 Starting overwatch
2025/11/02 13:45:46 Using namespace: kubernetes-dashboard
2025/11/02 13:45:46 Using in-cluster config to connect to apiserver
2025/11/02 13:45:46 Using secret token for csrf signing
2025/11/02 13:45:46 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/11/02 13:45:46 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/11/02 13:45:46 Successful initial request to the apiserver, version: v1.34.1
2025/11/02 13:45:46 Generating JWE encryption key
2025/11/02 13:45:46 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/11/02 13:45:46 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/11/02 13:45:47 Initializing JWE encryption key from synchronized object
2025/11/02 13:45:47 Creating in-cluster Sidecar client
2025/11/02 13:45:47 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/11/02 13:45:47 Serving insecurely on HTTP port: 9090
2025/11/02 13:46:36 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
==> storage-provisioner [b95dfdd05610] <==
I1102 13:45:33.650270 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1102 13:46:03.661740 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [d3dc8b7583ba] <==
I1102 13:46:38.584324 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1102 13:46:38.609941 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1102 13:46:38.610912 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1102 13:46:38.628854 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                helpers_test.go:269: (dbg) Run:  kubectl --context default-k8s-diff-port-311562 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
                                                
                                                helpers_test.go:280: non-running pods: metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk
helpers_test.go:282: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context default-k8s-diff-port-311562 describe pod metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context default-k8s-diff-port-311562 describe pod metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk: exit status 1 (65.823158ms)
** stderr **
Error from server (NotFound): pods "metrics-server-746fcd58dc-tcttv" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-tk9xk" not found
** /stderr **
helpers_test.go:287: kubectl --context default-k8s-diff-port-311562 describe pod metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk: exit status 1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                helpers_test.go:252: <<< TestStartStop/group/default-k8s-diff-port/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p default-k8s-diff-port-311562 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p default-k8s-diff-port-311562 logs -n 25: (1.467701783s)
helpers_test.go:260: TestStartStop/group/default-k8s-diff-port/serial/Pause logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ delete │ -p newest-cni-147975 │ newest-cni-147975 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ image │ default-k8s-diff-port-311562 image list --format=json │ default-k8s-diff-port-311562 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ pause │ -p default-k8s-diff-port-311562 --alsologtostderr -v=1 │ default-k8s-diff-port-311562 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ delete │ -p newest-cni-147975 │ newest-cni-147975 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which crictl │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which rsync │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which VBoxService │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which VBoxControl │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which wget │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which socat │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which git │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which podman │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which iptables │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which docker │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh which curl │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /data | grep /data │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/minikube | grep /var/lib/minikube │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/cni | grep /var/lib/cni │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh df -t ext4 /var/lib/docker | grep /var/lib/docker │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ ssh │ guest-929077 ssh test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND' │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ delete │ -p guest-929077 │ guest-929077 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
│ unpause │ -p default-k8s-diff-port-311562 --alsologtostderr -v=1 │ default-k8s-diff-port-311562 │ jenkins │ v1.37.0 │ 02 Nov 25 13:46 UTC │ 02 Nov 25 13:46 UTC │
└─────────┴───────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/11/02 13:45:27
Running on machine: ubuntu-20-agent-7
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1102 13:45:27.814942 52806 out.go:360] Setting OutFile to fd 1 ...
I1102 13:45:27.815219 52806 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1102 13:45:27.815230 52806 out.go:374] Setting ErrFile to fd 2...
I1102 13:45:27.815235 52806 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1102 13:45:27.815479 52806 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21808-9383/.minikube/bin
I1102 13:45:27.816019 52806 out.go:368] Setting JSON to false
	I1102 13:45:27.816916   52806 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":5278,"bootTime":1762085850,"procs":202,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1043-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
                                                
                                                I1102 13:45:27.817007 52806 start.go:143] virtualization: kvm guest
I1102 13:45:27.819158 52806 out.go:179] * [embed-certs-705938] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I1102 13:45:27.820405 52806 notify.go:221] Checking for updates...
I1102 13:45:27.820434 52806 out.go:179] - MINIKUBE_LOCATION=21808
I1102 13:45:27.821972 52806 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1102 13:45:27.823419 52806 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:45:27.824677 52806 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21808-9383/.minikube
I1102 13:45:27.825909 52806 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I1102 13:45:27.827139 52806 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I1102 13:45:27.828747 52806 config.go:182] Loaded profile config "default-k8s-diff-port-311562": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:27.828850 52806 config.go:182] Loaded profile config "guest-929077": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v0.0.0
I1102 13:45:27.828938 52806 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:27.829036 52806 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:27.829125 52806 driver.go:422] Setting default libvirt URI to qemu:///system
I1102 13:45:27.864969 52806 out.go:179] * Using the kvm2 driver based on user configuration
I1102 13:45:27.866282 52806 start.go:309] selected driver: kvm2
I1102 13:45:27.866299 52806 start.go:930] validating driver "kvm2" against <nil>
	I1102 13:45:27.866311   52806 start.go:941] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
                                                
                                                I1102 13:45:27.867320 52806 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I1102 13:45:27.867666 52806 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1102 13:45:27.867720 52806 cni.go:84] Creating CNI manager for ""
I1102 13:45:27.867784 52806 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:27.867797 52806 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I1102 13:45:27.867857 52806 start.go:353] cluster config:
	{Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Container
                                                
                                                Runtime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgent
                                                
                                                PID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:45:27.867993   52806 iso.go:125] acquiring lock: {Name:mk4c692b2fc885c991be3e19f361e45d770e6035 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:45:27.870465 52806 out.go:179] * Starting "embed-certs-705938" primary control-plane node in "embed-certs-705938" cluster
I1102 13:45:24.258413 52157 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:45:24.261942 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:24.262553   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:24.262596 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:24.262847 52157 ssh_runner.go:195] Run: grep 192.168.83.1 host.minikube.internal$ /etc/hosts
	I1102 13:45:24.267803   52157 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.83.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                	I1102 13:45:24.288892   52157 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-311562 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubernete
                                                
                                                sVersion:v1.34.1 ClusterName:default-k8s-diff-port-311562 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.253 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: N
                                                
                                                etwork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:45:24.289170 52157 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:45:24.289255   52157 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:24.312158 52157 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:45:24.312186 52157 docker.go:621] Images already preloaded, skipping extraction
	I1102 13:45:24.312243   52157 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:24.336173 52157 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:45:24.336205 52157 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:45:24.336218   52157 kubeadm.go:935] updating node { 192.168.83.253 8444 v1.34.1 docker true true} ...
                                                
                                                I1102 13:45:24.336363 52157 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-311562 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.83.253
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-311562 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:45:24.336444   52157 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:45:24.403493 52157 cni.go:84] Creating CNI manager for ""
I1102 13:45:24.403547 52157 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:24.403566 52157 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1102 13:45:24.403600   52157 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.83.253 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-311562 NodeName:default-k8s-diff-port-311562 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.83.253"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.83.253 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/c
                                                
                                                erts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:45:24.403780 52157 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.83.253
bindPort: 8444
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "default-k8s-diff-port-311562"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.83.253"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.83.253"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8444
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:45:24.403864 52157 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:45:24.419144 52157 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:45:24.419209 52157 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:45:24.433296 52157 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (329 bytes)
I1102 13:45:24.456694 52157 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:45:24.481139 52157 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2235 bytes)
I1102 13:45:24.508847 52157 ssh_runner.go:195] Run: grep 192.168.83.253 control-plane.minikube.internal$ /etc/hosts
	I1102 13:45:24.513455   52157 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.83.253	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:45:24.539087 52157 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:24.723252 52157 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:24.745506 52157 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562 for IP: 192.168.83.253
I1102 13:45:24.745533 52157 certs.go:195] generating shared ca certs ...
	I1102 13:45:24.745553   52157 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:24.745749 52157 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:45:24.745818 52157 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:45:24.745830 52157 certs.go:257] generating profile certs ...
I1102 13:45:24.745960 52157 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/client.key
I1102 13:45:24.746049 52157 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/apiserver.key.ad150ec8
I1102 13:45:24.746119 52157 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/proxy-client.key
I1102 13:45:24.746278 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:24.746379 52157 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:24.746397 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:24.746438 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:24.746473 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:24.746506 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:24.746564 52157 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:24.747319 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:45:24.816279 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:45:24.864897 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:45:24.902332 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:45:24.935953 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1440 bytes)
I1102 13:45:24.990314 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1102 13:45:25.027803 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:45:25.074608 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/default-k8s-diff-port-311562/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1102 13:45:25.120662 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:25.166354 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:25.209580 52157 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:25.249272 52157 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:45:25.279712 52157 ssh_runner.go:195] Run: openssl version
I1102 13:45:25.287027 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:25.306535 52157 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:25.313675 52157 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:25.313807 52157 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:25.321843 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:25.338104 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:25.357307 52157 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:25.363326 52157 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:25.363444 52157 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:25.373914 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:25.386652 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:25.405642 52157 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:25.411303 52157 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:25.411402 52157 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:25.419049 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:25.431308 52157 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:45:25.438509 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1102 13:45:25.449100 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1102 13:45:25.456907 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1102 13:45:25.467179 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1102 13:45:25.475514 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1102 13:45:25.483363 52157 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1102 13:45:25.491312   52157 kubeadm.go:401] StartCluster: {Name:default-k8s-diff-port-311562 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVe
                                                
                                                rsion:v1.34.1 ClusterName:default-k8s-diff-port-311562 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.253 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Netw
                                                
                                                ork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:45:25.491492   52157 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:25.517361 52157 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:45:25.535944 52157 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1102 13:45:25.535968 52157 kubeadm.go:598] restartPrimaryControlPlane start ...
I1102 13:45:25.536024 52157 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1102 13:45:25.552538 52157 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1102 13:45:25.553314 52157 kubeconfig.go:47] verify endpoint returned: get endpoint: "default-k8s-diff-port-311562" does not appear in /home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:45:25.553708 52157 kubeconfig.go:62] /home/jenkins/minikube-integration/21808-9383/kubeconfig needs updating (will repair): [kubeconfig missing "default-k8s-diff-port-311562" cluster setting kubeconfig missing "default-k8s-diff-port-311562" context setting]
	I1102 13:45:25.554366   52157 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:25.556177 52157 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1102 13:45:25.573392 52157 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.83.253
I1102 13:45:25.573435 52157 kubeadm.go:1161] stopping kube-system containers ...
	I1102 13:45:25.573505   52157 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:25.604235 52157 docker.go:484] Stopping containers: [3edfba6f9a3b 70e642b4f37f 1c545a43f579 0bebcbbb6c23 859f8a49dca1 dfd9d91f746b 282b9f80c345 38a817203976 326385a03f87 3db15e0e0d1e 693e1d6c029a 3d076807ce98 9c5c22752c78 4f4d927b9b11 20a067500af0 efae7bca22ce]
I1102 13:45:25.604321 52157 ssh_runner.go:195] Run: docker stop 3edfba6f9a3b 70e642b4f37f 1c545a43f579 0bebcbbb6c23 859f8a49dca1 dfd9d91f746b 282b9f80c345 38a817203976 326385a03f87 3db15e0e0d1e 693e1d6c029a 3d076807ce98 9c5c22752c78 4f4d927b9b11 20a067500af0 efae7bca22ce
I1102 13:45:25.644437 52157 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1102 13:45:25.687720 52157 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:45:25.701262 52157 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:45:25.701289 52157 kubeadm.go:158] found existing configuration files:
I1102 13:45:25.701352 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf
I1102 13:45:25.712652 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:45:25.712707 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:45:25.724832 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf
I1102 13:45:25.738016 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:45:25.738088 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:45:25.750148 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf
I1102 13:45:25.761179 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:45:25.761269 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:45:25.779488 52157 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf
I1102 13:45:25.793287 52157 kubeadm.go:164] "https://control-plane.minikube.internal:8444" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8444 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:45:25.793368 52157 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:45:25.810499 52157 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:45:25.822902 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:25.983948 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.057902 52157 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.073916863s)
I1102 13:45:27.057967 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.350358 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.448927 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:27.557150 52157 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:27.557235 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:28.057474 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:28.557655 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:29.058130 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:31.478717 52361 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.61.81:22: connect: no route to host
I1102 13:45:27.871749 52806 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1102 13:45:27.871787 52806 preload.go:198] Found local preload: /home/jenkins/minikube-integration/21808-9383/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
I1102 13:45:27.871794 52806 cache.go:59] Caching tarball of preloaded images
I1102 13:45:27.871901 52806 preload.go:233] Found /home/jenkins/minikube-integration/21808-9383/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I1102 13:45:27.871914 52806 cache.go:62] Finished verifying existence of preloaded tar for v1.34.1 on docker
I1102 13:45:27.871999 52806 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/config.json ...
	I1102 13:45:27.872018   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/config.json: {Name:mk922c147409f94b9bb8a612e552b8bcbdc6c60d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:45:27.872161   52806 start.go:360] acquireMachinesLock for embed-certs-705938: {Name:mkb7e4680c5870b574bd51b6ea3b2b307ff3694b Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
                                                
                                                I1102 13:45:29.102026 52157 api_server.go:72] duration metric: took 1.544885698s to wait for apiserver process to appear ...
I1102 13:45:29.102065 52157 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:29.102095 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:29.103198 52157 api_server.go:269] stopped: https://192.168.83.253:8444/healthz: Get "https://192.168.83.253:8444/healthz": dial tcp 192.168.83.253:8444: connect: connection refused
I1102 13:45:29.603036 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:32.333055 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:32.333090 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:32.333114 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:32.388238 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:32.388269 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:32.602829 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:32.625545 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:32.625582 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:33.102243 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:33.118492 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:33.118527 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:33.602919 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:33.626727 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:33.626754 52157 api_server.go:103] status: https://192.168.83.253:8444/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:34.102634 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:34.108370 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 200:
ok
I1102 13:45:34.116358 52157 api_server.go:141] control plane version: v1.34.1
I1102 13:45:34.116386 52157 api_server.go:131] duration metric: took 5.014312473s to wait for apiserver health ...
I1102 13:45:34.116396 52157 cni.go:84] Creating CNI manager for ""
I1102 13:45:34.116406 52157 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:34.117986 52157 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1102 13:45:34.119367 52157 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1102 13:45:34.143868 52157 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1102 13:45:34.171893 52157 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:34.177791 52157 system_pods.go:59] 8 kube-system pods found
I1102 13:45:34.177865 52157 system_pods.go:61] "coredns-66bc5c9577-bnv4n" [111da945-5109-4be5-9c67-f48cdaed8cbe] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:34.177879 52157 system_pods.go:61] "etcd-default-k8s-diff-port-311562" [0d2ea3b5-719d-42ed-b50e-bea33102fbd2] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:34.177892 52157 system_pods.go:61] "kube-apiserver-default-k8s-diff-port-311562" [3ab30a10-b48f-4807-b701-4ed47eb1dec1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:34.177905 52157 system_pods.go:61] "kube-controller-manager-default-k8s-diff-port-311562" [7b9e15c3-e059-4867-832e-5d67b1eff8f8] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:45:34.177919 52157 system_pods.go:61] "kube-proxy-5qv84" [11842593-1fe8-476c-a692-ecdecf44fafa] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:45:34.177927 52157 system_pods.go:61] "kube-scheduler-default-k8s-diff-port-311562" [ad4bb6b8-cae7-4cfc-8f3f-3779226708e6] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:45:34.177946 52157 system_pods.go:61] "metrics-server-746fcd58dc-tcttv" [e9fc9174-d97e-4486-a4da-a405ebd4a7f3] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:34.177960 52157 system_pods.go:61] "storage-provisioner" [ce313798-c158-4174-aec9-8d1e48caceea] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1102 13:45:34.177972 52157 system_pods.go:74] duration metric: took 6.048605ms to wait for pod list to return data ...
I1102 13:45:34.177982 52157 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:34.181850 52157 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:34.181899 52157 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:34.181917 52157 node_conditions.go:105] duration metric: took 3.925526ms to run NodePressure ...
I1102 13:45:34.181986 52157 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:34.570876 52157 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1102 13:45:34.574447 52157 kubeadm.go:744] kubelet initialised
I1102 13:45:34.574472 52157 kubeadm.go:745] duration metric: took 3.566201ms waiting for restarted kubelet to initialise ...
I1102 13:45:34.574488 52157 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1102 13:45:34.592867 52157 ops.go:34] apiserver oom_adj: -16
I1102 13:45:34.592894 52157 kubeadm.go:602] duration metric: took 9.056917288s to restartPrimaryControlPlane
I1102 13:45:34.592906 52157 kubeadm.go:403] duration metric: took 9.101604844s to StartCluster
	I1102 13:45:34.592928   52157 settings.go:142] acquiring lock: {Name:mk2d74ff80d6e54b2738086ad41016418abd2f10 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:34.593024 52157 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21808-9383/kubeconfig
	I1102 13:45:34.593729   52157 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:45:34.593994   52157 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.83.253 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:45:34.594093 52157 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1102 13:45:34.594205 52157 config.go:182] Loaded profile config "default-k8s-diff-port-311562": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:34.594219 52157 addons.go:70] Setting metrics-server=true in profile "default-k8s-diff-port-311562"
I1102 13:45:34.594236 52157 addons.go:239] Setting addon metrics-server=true in "default-k8s-diff-port-311562"
I1102 13:45:34.594244 52157 addons.go:70] Setting default-storageclass=true in profile "default-k8s-diff-port-311562"
W1102 13:45:34.594254 52157 addons.go:248] addon metrics-server should already be in state true
I1102 13:45:34.594206 52157 addons.go:70] Setting storage-provisioner=true in profile "default-k8s-diff-port-311562"
I1102 13:45:34.594270 52157 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "default-k8s-diff-port-311562"
I1102 13:45:34.594286 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
	I1102 13:45:34.594286   52157 cache.go:107] acquiring lock: {Name:mkfde24ce23f92e3eaf637254ed5ac4355c07159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:45:34.594296 52157 addons.go:239] Setting addon storage-provisioner=true in "default-k8s-diff-port-311562"
W1102 13:45:34.594326 52157 addons.go:248] addon storage-provisioner should already be in state true
I1102 13:45:34.594260 52157 addons.go:70] Setting dashboard=true in profile "default-k8s-diff-port-311562"
I1102 13:45:34.594370 52157 cache.go:115] /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1102 13:45:34.594371 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
I1102 13:45:34.594373 52157 addons.go:239] Setting addon dashboard=true in "default-k8s-diff-port-311562"
W1102 13:45:34.594385 52157 addons.go:248] addon dashboard should already be in state true
I1102 13:45:34.594382 52157 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 120.497µs
I1102 13:45:34.594393 52157 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1102 13:45:34.594410 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
I1102 13:45:34.594401 52157 cache.go:87] Successfully saved all images to host disk.
I1102 13:45:34.594626 52157 config.go:182] Loaded profile config "default-k8s-diff-port-311562": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1102 13:45:34.598052   52157 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:34.598276 52157 addons.go:239] Setting addon default-storageclass=true in "default-k8s-diff-port-311562"
W1102 13:45:34.598295 52157 addons.go:248] addon default-storageclass should already be in state true
I1102 13:45:34.598320 52157 host.go:66] Checking if "default-k8s-diff-port-311562" exists ...
I1102 13:45:34.599734 52157 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1102 13:45:34.599742 52157 out.go:179] * Verifying Kubernetes components...
I1102 13:45:34.599762 52157 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1102 13:45:34.599742 52157 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1102 13:45:34.600285 52157 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:34.600302 52157 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1102 13:45:34.601511 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:34.601857 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1102 13:45:34.601895 52157 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1102 13:45:34.601934 52157 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:34.601948 52157 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1102 13:45:34.601890 52157 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1102 13:45:34.602246   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.602282 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.602739   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.603296 52157 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1102 13:45:34.604120 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:34.604551 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1102 13:45:34.604571 52157 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I1102 13:45:34.604861   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.604901 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.605228   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.606525 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
I1102 13:45:34.606530 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.607058   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.607090 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.607167   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.607201 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.607285   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                	I1102 13:45:34.607545   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.608392 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.608859   52157 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:7e:57:ae", ip: ""} in network mk-default-k8s-diff-port-311562: {Iface:virbr5 ExpiryTime:2025-11-02 14:45:16 +0000 UTC Type:0 Mac:52:54:00:7e:57:ae Iaid: IPaddr:192.168.83.253 Prefix:24 Hostname:default-k8s-diff-port-311562 Clientid:01:52:54:00:7e:57:ae}
                                                
                                                I1102 13:45:34.608896 52157 main.go:143] libmachine: domain default-k8s-diff-port-311562 has defined IP address 192.168.83.253 and MAC address 52:54:00:7e:57:ae in network mk-default-k8s-diff-port-311562
	I1102 13:45:34.609076   52157 sshutil.go:53] new ssh client: &{IP:192.168.83.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/default-k8s-diff-port-311562/id_rsa Username:docker}
                                                
                                                I1102 13:45:34.876033 52157 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:34.907535 52157 node_ready.go:35] waiting up to 6m0s for node "default-k8s-diff-port-311562" to be "Ready" ...
I1102 13:45:34.937033 52157 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:45:34.937057 52157 cache_images.go:86] Images are preloaded, skipping loading
I1102 13:45:34.937065 52157 cache_images.go:264] succeeded pushing to: default-k8s-diff-port-311562
I1102 13:45:34.984071 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:34.987387 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1102 13:45:34.987406 52157 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1102 13:45:34.998108 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1102 13:45:34.998131 52157 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1102 13:45:35.001421 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:35.058884 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1102 13:45:35.058914 52157 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1102 13:45:35.065034 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1102 13:45:35.065068 52157 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1102 13:45:35.086869 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1102 13:45:35.086900 52157 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1102 13:45:35.125203 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1102 13:45:35.125231 52157 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1102 13:45:35.127236 52157 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:35.127260 52157 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1102 13:45:35.168655 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:35.200958 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1102 13:45:35.200990 52157 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1102 13:45:35.253959 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1102 13:45:35.253989 52157 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1102 13:45:35.373065 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1102 13:45:35.373093 52157 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1102 13:45:35.451915 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1102 13:45:35.451951 52157 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1102 13:45:35.522842 52157 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:35.522875 52157 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1102 13:45:35.583283 52157 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:36.793258 52157 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.791801585s)
I1102 13:45:36.793434 52157 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.624737745s)
I1102 13:45:36.793466 52157 addons.go:480] Verifying addon metrics-server=true in "default-k8s-diff-port-311562"
W1102 13:45:36.940528 52157 node_ready.go:57] node "default-k8s-diff-port-311562" has "Ready":"False" status (will retry)
I1102 13:45:37.026166 52157 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.442836828s)
I1102 13:45:37.027775 52157 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p default-k8s-diff-port-311562 addons enable metrics-server
I1102 13:45:37.029478 52157 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1102 13:45:34.481026 52361 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.61.81:22: connect: connection refused
I1102 13:45:37.030957 52157 addons.go:515] duration metric: took 2.436866044s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
I1102 13:45:39.865356 52370 start.go:364] duration metric: took 32.34908828s to acquireMachinesLock for "no-preload-047294"
I1102 13:45:39.865424 52370 start.go:96] Skipping create...Using existing machine configuration
I1102 13:45:39.865433 52370 fix.go:54] fixHost starting:
I1102 13:45:39.867915 52370 fix.go:112] recreateIfNeeded on no-preload-047294: state=Stopped err=<nil>
W1102 13:45:39.867946 52370 fix.go:138] unexpected machine state, will restart: <nil>
I1102 13:45:37.581415 52361 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1102 13:45:37.584895 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.585320   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.585354 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.585574 52361 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/config.json ...
I1102 13:45:37.585807 52361 machine.go:94] provisionDockerMachine start ...
I1102 13:45:37.588182 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.588520   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.588546 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.588703 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:37.588927   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:37.588938 52361 main.go:143] libmachine: About to run SSH command:
hostname
I1102 13:45:37.694418 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1102 13:45:37.694464 52361 buildroot.go:166] provisioning hostname "newest-cni-147975"
I1102 13:45:37.697594 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.698064   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.698103 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.698302 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:37.698584   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:37.698601 52361 main.go:143] libmachine: About to run SSH command:
sudo hostname newest-cni-147975 && echo "newest-cni-147975" | sudo tee /etc/hostname
I1102 13:45:37.818073 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: newest-cni-147975
I1102 13:45:37.821034 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.821427   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.821465 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.821608 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:37.821800   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:37.821816 52361 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\snewest-cni-147975' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-147975/g' /etc/hosts;
else
echo '127.0.1.1 newest-cni-147975' | sudo tee -a /etc/hosts;
fi
fi
I1102 13:45:37.929633 52361 main.go:143] libmachine: SSH cmd err, output: <nil>:
	I1102 13:45:37.929665   52361 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21808-9383/.minikube CaCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21808-9383/.minikube}
                                                
                                                I1102 13:45:37.929706 52361 buildroot.go:174] setting up certificates
I1102 13:45:37.929719 52361 provision.go:84] configureAuth start
I1102 13:45:37.932700 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.933090   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.933122 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.935216 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:37.935541   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:37.935565 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:37.935692 52361 provision.go:143] copyHostCerts
I1102 13:45:37.935744 52361 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem, removing ...
I1102 13:45:37.935776 52361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem
I1102 13:45:37.935850 52361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem (1082 bytes)
I1102 13:45:37.935958 52361 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem, removing ...
I1102 13:45:37.935969 52361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem
I1102 13:45:37.936002 52361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem (1123 bytes)
I1102 13:45:37.936085 52361 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem, removing ...
I1102 13:45:37.936094 52361 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem
I1102 13:45:37.936135 52361 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem (1675 bytes)
I1102 13:45:37.936203 52361 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem org=jenkins.newest-cni-147975 san=[127.0.0.1 192.168.61.81 localhost minikube newest-cni-147975]
I1102 13:45:38.155299 52361 provision.go:177] copyRemoteCerts
I1102 13:45:38.155382 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1102 13:45:38.158216 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.158589   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.158618 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.158749   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:38.239867 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1102 13:45:38.270160 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1102 13:45:38.299806 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1102 13:45:38.335353 52361 provision.go:87] duration metric: took 405.605042ms to configureAuth
I1102 13:45:38.335388 52361 buildroot.go:189] setting minikube options for container-runtime
I1102 13:45:38.335592 52361 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:38.338168 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.338550   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.338571 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:38.338739 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:38.338923   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:38.338933 52361 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1102 13:45:38.438323 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1102 13:45:38.438372 52361 buildroot.go:70] root file system type: tmpfs
I1102 13:45:38.438525 52361 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1102 13:45:38.441568 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.442033   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.442065 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:38.442251 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:38.442490   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:38.442545 52361 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1102 13:45:38.558405 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1102 13:45:38.561299 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:38.561670   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:38.561693 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:38.561907 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:38.562125   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:38.562140 52361 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
                                                
                                                I1102 13:45:39.604281 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1102 13:45:39.604317 52361 machine.go:97] duration metric: took 2.018495868s to provisionDockerMachine
I1102 13:45:39.604334 52361 start.go:293] postStartSetup for "newest-cni-147975" (driver="kvm2")
I1102 13:45:39.604373 52361 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1102 13:45:39.604448 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1102 13:45:39.608114 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.608712   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.608752 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.608967   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:39.692467 52361 ssh_runner.go:195] Run: cat /etc/os-release
I1102 13:45:39.697397 52361 info.go:137] Remote host: Buildroot 2025.02
I1102 13:45:39.697432 52361 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/addons for local assets ...
I1102 13:45:39.697520 52361 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/files for local assets ...
I1102 13:45:39.697622 52361 filesync.go:149] local asset: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem -> 132702.pem in /etc/ssl/certs
I1102 13:45:39.697739 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1102 13:45:39.710993 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:39.750212 52361 start.go:296] duration metric: took 145.83601ms for postStartSetup
I1102 13:45:39.750248 52361 fix.go:56] duration metric: took 18.890794371s for fixHost
I1102 13:45:39.753417 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.753758   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.753780 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:39.753984 52361 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:39.754184   52361 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.61.81 22 <nil> <nil>}
                                                
                                                I1102 13:45:39.754195 52361 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1102 13:45:39.865055 52361 main.go:143] libmachine: SSH cmd err, output: <nil>: 1762091139.840815559
I1102 13:45:39.865088 52361 fix.go:216] guest clock: 1762091139.840815559
I1102 13:45:39.865100 52361 fix.go:229] Guest: 2025-11-02 13:45:39.840815559 +0000 UTC Remote: 2025-11-02 13:45:39.750251978 +0000 UTC m=+32.410617405 (delta=90.563581ms)
I1102 13:45:39.865202 52361 fix.go:200] guest clock delta is within tolerance: 90.563581ms
I1102 13:45:39.865221 52361 start.go:83] releasing machines lock for "newest-cni-147975", held for 19.005792633s
I1102 13:45:39.869172 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.869788   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.869817 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:39.870311 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:39.870370 52361 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:39.870386 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:39.870423 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:39.870456 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:39.870489 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:39.870554 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:39.870644 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:39.873860 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.874287   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:39.874308 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:39.874488   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:39.993846 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:40.033517 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:40.071252 52361 ssh_runner.go:195] Run: openssl version
I1102 13:45:40.078368 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:40.097162 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:40.103935 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:40.104003 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:40.113374 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:40.127502 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:40.144554 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:40.150102 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:40.150187 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:40.157847 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:40.171118 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:40.185519 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:40.190944 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:40.191025 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:40.198479 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:40.211854 52361 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-certificates >/dev/null 2>&1 && sudo update-ca-certificates || true"
I1102 13:45:40.216528 52361 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-trust >/dev/null 2>&1 && sudo update-ca-trust extract || true"
I1102 13:45:40.222954 52361 ssh_runner.go:195] Run: cat /version.json
I1102 13:45:40.223040 52361 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1102 13:45:40.255994 52361 ssh_runner.go:195] Run: systemctl --version
I1102 13:45:40.262413 52361 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1102 13:45:40.268788 52361 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1102 13:45:40.268851   52361 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
                                                
                                                I1102 13:45:40.290406 52361 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1102 13:45:40.290442 52361 start.go:496] detecting cgroup driver to use...
I1102 13:45:40.290591 52361 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:45:40.314229 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1102 13:45:40.329568 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1102 13:45:40.342632 52361 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1102 13:45:40.342731 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1102 13:45:40.356788 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:45:40.372447 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1102 13:45:40.387923 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:45:40.401408 52361 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1102 13:45:40.417560 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1102 13:45:40.432329 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1102 13:45:40.445927 52361 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1102 13:45:40.458528 52361 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1102 13:45:40.470044 52361 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1102 13:45:40.470120 52361 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1102 13:45:40.486640 52361 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1102 13:45:40.501035 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:40.657081 52361 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1102 13:45:40.702685 52361 start.go:496] detecting cgroup driver to use...
I1102 13:45:40.702800 52361 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1102 13:45:40.727172 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:45:40.748964 52361 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1102 13:45:40.770268 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:45:40.788631 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:45:40.808214 52361 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1102 13:45:40.845782 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:45:40.863375 52361 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:45:40.889520 52361 ssh_runner.go:195] Run: which cri-dockerd
I1102 13:45:40.893858 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1102 13:45:40.906355 52361 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1102 13:45:40.932238 52361 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1102 13:45:41.114491 52361 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1102 13:45:41.329446 52361 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1102 13:45:41.329575 52361 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1102 13:45:41.359524 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:45:41.381666 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:41.587399 52361 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:45:42.201139 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1102 13:45:42.219085 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1102 13:45:42.237760 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:42.256754 52361 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1102 13:45:39.869851 52370 out.go:252] * Restarting existing kvm2 VM for "no-preload-047294" ...
I1102 13:45:39.869901 52370 main.go:143] libmachine: starting domain...
I1102 13:45:39.869917 52370 main.go:143] libmachine: ensuring networks are active...
I1102 13:45:39.871270 52370 main.go:143] libmachine: Ensuring network default is active
I1102 13:45:39.871811 52370 main.go:143] libmachine: Ensuring network mk-no-preload-047294 is active
I1102 13:45:39.872613 52370 main.go:143] libmachine: getting domain XML...
I1102 13:45:39.873800 52370 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>no-preload-047294</name>
<uuid>b0e1e20f-05fd-4dfa-b87d-3577e480609e</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/no-preload-047294.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:26:8d:fe'/>
<source network='mk-no-preload-047294'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:f1:04:54'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1102 13:45:41.294503 52370 main.go:143] libmachine: waiting for domain to start...
I1102 13:45:41.296065 52370 main.go:143] libmachine: domain is now running
I1102 13:45:41.296088 52370 main.go:143] libmachine: waiting for IP...
I1102 13:45:41.297084 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:41.297908 52370 main.go:143] libmachine: domain no-preload-047294 has current primary IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:41.297923 52370 main.go:143] libmachine: found domain IP: 192.168.72.63
I1102 13:45:41.297928 52370 main.go:143] libmachine: reserving static IP address...
	I1102 13:45:41.298428   52370 main.go:143] libmachine: found host DHCP lease matching {name: "no-preload-047294", mac: "52:54:00:26:8d:fe", ip: "192.168.72.63"} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:43:15 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                	I1102 13:45:41.298467   52370 main.go:143] libmachine: skip adding static IP to network mk-no-preload-047294 - found existing host DHCP lease matching {name: "no-preload-047294", mac: "52:54:00:26:8d:fe", ip: "192.168.72.63"}
                                                
                                                I1102 13:45:41.298477 52370 main.go:143] libmachine: reserved static IP address 192.168.72.63 for domain no-preload-047294
I1102 13:45:41.298486 52370 main.go:143] libmachine: waiting for SSH...
I1102 13:45:41.298494 52370 main.go:143] libmachine: Getting to WaitForSSH function...
I1102 13:45:41.301093 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:41.301540   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:43:15 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:41.301570 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:41.301922 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:41.302191   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:41.302205 52370 main.go:143] libmachine: About to run SSH command:
exit 0
I1102 13:45:39.410932 52157 node_ready.go:49] node "default-k8s-diff-port-311562" is "Ready"
I1102 13:45:39.410972 52157 node_ready.go:38] duration metric: took 4.503397384s for node "default-k8s-diff-port-311562" to be "Ready" ...
I1102 13:45:39.410995 52157 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:39.411072 52157 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:39.432862 52157 api_server.go:72] duration metric: took 4.838818522s to wait for apiserver process to appear ...
I1102 13:45:39.432889 52157 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:39.432922 52157 api_server.go:253] Checking apiserver healthz at https://192.168.83.253:8444/healthz ...
I1102 13:45:39.441678 52157 api_server.go:279] https://192.168.83.253:8444/healthz returned 200:
ok
I1102 13:45:39.443725 52157 api_server.go:141] control plane version: v1.34.1
I1102 13:45:39.443750 52157 api_server.go:131] duration metric: took 10.854353ms to wait for apiserver health ...
I1102 13:45:39.443761 52157 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:39.450979 52157 system_pods.go:59] 8 kube-system pods found
I1102 13:45:39.451013 52157 system_pods.go:61] "coredns-66bc5c9577-bnv4n" [111da945-5109-4be5-9c67-f48cdaed8cbe] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:39.451024 52157 system_pods.go:61] "etcd-default-k8s-diff-port-311562" [0d2ea3b5-719d-42ed-b50e-bea33102fbd2] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:39.451035 52157 system_pods.go:61] "kube-apiserver-default-k8s-diff-port-311562" [3ab30a10-b48f-4807-b701-4ed47eb1dec1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:39.451043 52157 system_pods.go:61] "kube-controller-manager-default-k8s-diff-port-311562" [7b9e15c3-e059-4867-832e-5d67b1eff8f8] Running
I1102 13:45:39.451052 52157 system_pods.go:61] "kube-proxy-5qv84" [11842593-1fe8-476c-a692-ecdecf44fafa] Running
I1102 13:45:39.451065 52157 system_pods.go:61] "kube-scheduler-default-k8s-diff-port-311562" [ad4bb6b8-cae7-4cfc-8f3f-3779226708e6] Running
I1102 13:45:39.451074 52157 system_pods.go:61] "metrics-server-746fcd58dc-tcttv" [e9fc9174-d97e-4486-a4da-a405ebd4a7f3] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:39.451083 52157 system_pods.go:61] "storage-provisioner" [ce313798-c158-4174-aec9-8d1e48caceea] Running
I1102 13:45:39.451092 52157 system_pods.go:74] duration metric: took 7.317334ms to wait for pod list to return data ...
I1102 13:45:39.451103 52157 default_sa.go:34] waiting for default service account to be created ...
I1102 13:45:39.456302 52157 default_sa.go:45] found service account: "default"
I1102 13:45:39.456332 52157 default_sa.go:55] duration metric: took 5.220853ms for default service account to be created ...
I1102 13:45:39.456362 52157 system_pods.go:116] waiting for k8s-apps to be running ...
I1102 13:45:39.461632 52157 system_pods.go:86] 8 kube-system pods found
I1102 13:45:39.461674 52157 system_pods.go:89] "coredns-66bc5c9577-bnv4n" [111da945-5109-4be5-9c67-f48cdaed8cbe] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:39.461689 52157 system_pods.go:89] "etcd-default-k8s-diff-port-311562" [0d2ea3b5-719d-42ed-b50e-bea33102fbd2] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:39.461701 52157 system_pods.go:89] "kube-apiserver-default-k8s-diff-port-311562" [3ab30a10-b48f-4807-b701-4ed47eb1dec1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:39.461714 52157 system_pods.go:89] "kube-controller-manager-default-k8s-diff-port-311562" [7b9e15c3-e059-4867-832e-5d67b1eff8f8] Running
I1102 13:45:39.461721 52157 system_pods.go:89] "kube-proxy-5qv84" [11842593-1fe8-476c-a692-ecdecf44fafa] Running
I1102 13:45:39.461726 52157 system_pods.go:89] "kube-scheduler-default-k8s-diff-port-311562" [ad4bb6b8-cae7-4cfc-8f3f-3779226708e6] Running
I1102 13:45:39.461734 52157 system_pods.go:89] "metrics-server-746fcd58dc-tcttv" [e9fc9174-d97e-4486-a4da-a405ebd4a7f3] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:39.461743 52157 system_pods.go:89] "storage-provisioner" [ce313798-c158-4174-aec9-8d1e48caceea] Running
I1102 13:45:39.461754 52157 system_pods.go:126] duration metric: took 5.382647ms to wait for k8s-apps to be running ...
I1102 13:45:39.461767 52157 system_svc.go:44] waiting for kubelet service to be running ....
I1102 13:45:39.461815 52157 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1102 13:45:39.514748 52157 system_svc.go:56] duration metric: took 52.972619ms WaitForService to wait for kubelet
I1102 13:45:39.514779 52157 kubeadm.go:587] duration metric: took 4.920739305s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1102 13:45:39.514798 52157 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:39.521119 52157 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:39.521150 52157 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:39.521163 52157 node_conditions.go:105] duration metric: took 6.360208ms to run NodePressure ...
I1102 13:45:39.521177 52157 start.go:242] waiting for startup goroutines ...
I1102 13:45:39.521187 52157 start.go:247] waiting for cluster config update ...
I1102 13:45:39.521201 52157 start.go:256] writing updated cluster config ...
I1102 13:45:39.521537 52157 ssh_runner.go:195] Run: rm -f paused
I1102 13:45:39.528712 52157 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:45:39.534448 52157 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-bnv4n" in "kube-system" namespace to be "Ready" or be gone ...
W1102 13:45:41.545277 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
W1102 13:45:44.042820 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
I1102 13:45:42.440572 52361 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1102 13:45:42.638515 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:42.794631 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1102 13:45:42.837723 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:45:42.859277 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:43.058705 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:45:43.093206 52361 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:45:43.113952 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:43.135431 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:45:43.156617 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:43.330285 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:45:43.351880 52361 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:45:43.372910 52361 retry.go:31] will retry after 905.68851ms: cri-docker.service not running
I1102 13:45:44.279050 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:44.296654 52361 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:45:44.312440 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:44.470640 52361 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:45:44.600633 52361 retry.go:31] will retry after 2.029848811s: cri-docker.service not running
I1102 13:45:46.630725 52361 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:45:46.648951 52361 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1102 13:45:46.649026 52361 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1102 13:45:46.655597 52361 start.go:564] Will wait 60s for crictl version
I1102 13:45:46.655661 52361 ssh_runner.go:195] Run: which crictl
I1102 13:45:46.660129 52361 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1102 13:45:46.705138 52361 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
	I1102 13:45:46.705214   52361 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                	I1102 13:45:46.744452   52361 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                I1102 13:45:46.780423 52361 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:45:46.783916 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:46.784424   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:46.784454 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:46.784707 52361 ssh_runner.go:195] Run: grep 192.168.61.1 host.minikube.internal$ /etc/hosts
	I1102 13:45:46.791049   52361 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.61.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:45:46.815943 52361 out.go:179] - kubeadm.pod-network-cidr=10.42.0.0/16
	I1102 13:45:46.817335   52361 kubeadm.go:884] updating cluster {Name:newest-cni-147975 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
                                                
                                                .34.1 ClusterName:newest-cni-147975 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.61.81 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledSto
                                                
                                                p:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:45:46.817504 52361 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:45:46.817583   52361 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:46.842576 52361 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:45:46.842612 52361 docker.go:621] Images already preloaded, skipping extraction
	I1102 13:45:46.842678   52361 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:46.872207 52361 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:45:46.872231 52361 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:45:46.872240   52361 kubeadm.go:935] updating node { 192.168.61.81 8443 v1.34.1 docker true true} ...
                                                
                                                I1102 13:45:46.872396 52361 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=newest-cni-147975 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.61.81
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:newest-cni-147975 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:45:46.872497   52361 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:45:46.939383 52361 cni.go:84] Creating CNI manager for ""
I1102 13:45:46.939435 52361 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:46.939451 52361 kubeadm.go:85] Using pod CIDR: 10.42.0.0/16
	I1102 13:45:46.939480   52361 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.61.81 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-147975 NodeName:newest-cni-147975 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.61.81"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.61.81 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
                                                
                                                /etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:45:46.939644 52361 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.61.81
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "newest-cni-147975"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.61.81"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.61.81"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.42.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.42.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:45:46.939715 52361 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:45:46.952774 52361 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:45:46.952868 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:45:46.965101 52361 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I1102 13:45:46.992105 52361 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:45:47.018046 52361 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2219 bytes)
I1102 13:45:47.044224 52361 ssh_runner.go:195] Run: grep 192.168.61.81 control-plane.minikube.internal$ /etc/hosts
	I1102 13:45:47.049806   52361 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.61.81	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:45:47.065913 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:47.211899 52361 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:47.246223 52361 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975 for IP: 192.168.61.81
I1102 13:45:47.246248 52361 certs.go:195] generating shared ca certs ...
	I1102 13:45:47.246269   52361 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:47.246458 52361 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:45:47.246525 52361 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:45:47.246536 52361 certs.go:257] generating profile certs ...
I1102 13:45:47.246639 52361 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/client.key
I1102 13:45:47.246728 52361 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/apiserver.key.6dcf010f
I1102 13:45:47.246797 52361 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/proxy-client.key
I1102 13:45:47.246938 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:47.246973 52361 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:47.246983 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:47.247016 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:47.247047 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:47.247090 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:47.247148 52361 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:47.248079 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:45:47.297167 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:45:47.330951 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:45:47.368851 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:45:44.406555 52370 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.63:22: connect: no route to host
W1102 13:45:46.242256 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
W1102 13:45:48.544971 52157 pod_ready.go:104] pod "coredns-66bc5c9577-bnv4n" is not "Ready", error: <nil>
I1102 13:45:50.044033 52157 pod_ready.go:94] pod "coredns-66bc5c9577-bnv4n" is "Ready"
I1102 13:45:50.044080 52157 pod_ready.go:86] duration metric: took 10.509604165s for pod "coredns-66bc5c9577-bnv4n" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.048899 52157 pod_ready.go:83] waiting for pod "etcd-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.055237 52157 pod_ready.go:94] pod "etcd-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:50.055266 52157 pod_ready.go:86] duration metric: took 6.333452ms for pod "etcd-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.059738 52157 pod_ready.go:83] waiting for pod "kube-apiserver-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.067246 52157 pod_ready.go:94] pod "kube-apiserver-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:50.067275 52157 pod_ready.go:86] duration metric: took 7.504959ms for pod "kube-apiserver-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.070558 52157 pod_ready.go:83] waiting for pod "kube-controller-manager-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.239054 52157 pod_ready.go:94] pod "kube-controller-manager-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:50.239089 52157 pod_ready.go:86] duration metric: took 168.502477ms for pod "kube-controller-manager-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.439772 52157 pod_ready.go:83] waiting for pod "kube-proxy-5qv84" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:50.839594 52157 pod_ready.go:94] pod "kube-proxy-5qv84" is "Ready"
I1102 13:45:50.839629 52157 pod_ready.go:86] duration metric: took 399.815208ms for pod "kube-proxy-5qv84" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:51.040877 52157 pod_ready.go:83] waiting for pod "kube-scheduler-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:51.439742 52157 pod_ready.go:94] pod "kube-scheduler-default-k8s-diff-port-311562" is "Ready"
I1102 13:45:51.439773 52157 pod_ready.go:86] duration metric: took 398.85952ms for pod "kube-scheduler-default-k8s-diff-port-311562" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:45:51.439788 52157 pod_ready.go:40] duration metric: took 11.911040292s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:45:51.507427 52157 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0)
I1102 13:45:51.510696 52157 out.go:179] * Done! kubectl is now configured to use "default-k8s-diff-port-311562" cluster and "default" namespace by default
I1102 13:45:47.406089 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1102 13:45:47.442991 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1102 13:45:47.476535 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:45:47.518129 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/newest-cni-147975/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1102 13:45:47.554833 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:47.590010 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:47.624568 52361 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:47.661561 52361 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:45:47.685315 52361 ssh_runner.go:195] Run: openssl version
I1102 13:45:47.692602 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:47.706611 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:47.712410 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:47.712482 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:47.720260 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:47.732929 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:47.751436 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:47.756780 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:47.756847 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:47.764402 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:47.778176 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:47.792782 52361 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:47.798225 52361 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:47.798282 52361 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:47.805999 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:47.818047 52361 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:45:47.823613 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1102 13:45:47.831407 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1102 13:45:47.839095 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1102 13:45:47.847396 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1102 13:45:47.855676 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1102 13:45:47.863567 52361 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1102 13:45:47.871243   52361 kubeadm.go:401] StartCluster: {Name:newest-cni-147975 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
                                                
                                                .1 ClusterName:newest-cni-147975 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.61.81 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<
                                                
                                                nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:45:47.871397   52361 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:47.892424 52361 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:45:47.906131 52361 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1102 13:45:47.906166 52361 kubeadm.go:598] restartPrimaryControlPlane start ...
I1102 13:45:47.906234 52361 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1102 13:45:47.919968 52361 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1102 13:45:47.920513 52361 kubeconfig.go:47] verify endpoint returned: get endpoint: "newest-cni-147975" does not appear in /home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:45:47.920771 52361 kubeconfig.go:62] /home/jenkins/minikube-integration/21808-9383/kubeconfig needs updating (will repair): [kubeconfig missing "newest-cni-147975" cluster setting kubeconfig missing "newest-cni-147975" context setting]
	I1102 13:45:47.921164   52361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:47.922488 52361 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1102 13:45:47.935887 52361 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.61.81
I1102 13:45:47.935923 52361 kubeadm.go:1161] stopping kube-system containers ...
	I1102 13:45:47.935981   52361 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:45:47.957367 52361 docker.go:484] Stopping containers: [796077e2e305 dda81dfc8f93 7da452f8624c 0e3279276aad 40296b761124 a375f5d36ad1 25ea9b121d14 5d63adead0dc 561a7dbe6c95 4c13272346bd d81530d0990b a051ad8744d8 004a21d2302e 4604fd97c7bc 4c3b7b51c863 0cfe01cc5020 092464ab860e]
I1102 13:45:47.957457 52361 ssh_runner.go:195] Run: docker stop 796077e2e305 dda81dfc8f93 7da452f8624c 0e3279276aad 40296b761124 a375f5d36ad1 25ea9b121d14 5d63adead0dc 561a7dbe6c95 4c13272346bd d81530d0990b a051ad8744d8 004a21d2302e 4604fd97c7bc 4c3b7b51c863 0cfe01cc5020 092464ab860e
I1102 13:45:47.979846 52361 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1102 13:45:47.999177 52361 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:45:48.012380 52361 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:45:48.012405 52361 kubeadm.go:158] found existing configuration files:
I1102 13:45:48.012460 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1102 13:45:48.022972 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:45:48.023027 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:45:48.033987 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1102 13:45:48.047093 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:45:48.047186 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:45:48.058952 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1102 13:45:48.069542 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:45:48.069612 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:45:48.081299 52361 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1102 13:45:48.092106 52361 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:45:48.092176 52361 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:45:48.104552 52361 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:45:48.116359 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:48.247116 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.517955 52361 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.270797804s)
I1102 13:45:49.518018 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.815066 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.879178 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:49.948523 52361 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:49.948619 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:50.448833 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:50.949043 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:51.448826 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:51.521892 52361 api_server.go:72] duration metric: took 1.573385649s to wait for apiserver process to appear ...
I1102 13:45:51.521919 52361 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:51.521942 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:51.522404 52361 api_server.go:269] stopped: https://192.168.61.81:8443/healthz: Get "https://192.168.61.81:8443/healthz": dial tcp 192.168.61.81:8443: connect: connection refused
I1102 13:45:52.022045 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:50.486647 52370 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.63:22: connect: no route to host
I1102 13:45:54.473174 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:54.473203 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:54.473217 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:54.493004 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:45:54.493034 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:45:54.522389 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:54.623269 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:54.623299 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:55.022896 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:55.031634 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:55.031663 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:55.522128 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:55.534685 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:45:55.534710 52361 api_server.go:103] status: https://192.168.61.81:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:45:56.022315 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:56.027465 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 200:
ok
I1102 13:45:56.034524 52361 api_server.go:141] control plane version: v1.34.1
I1102 13:45:56.034546 52361 api_server.go:131] duration metric: took 4.512620372s to wait for apiserver health ...
I1102 13:45:56.034555 52361 cni.go:84] Creating CNI manager for ""
I1102 13:45:56.034565 52361 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:45:56.036598 52361 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1102 13:45:56.037929 52361 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1102 13:45:56.054007 52361 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1102 13:45:56.093299 52361 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:56.097859 52361 system_pods.go:59] 9 kube-system pods found
I1102 13:45:56.097909 52361 system_pods.go:61] "coredns-66bc5c9577-9kzzv" [c30e9e61-e9a7-41ae-9d6f-d74a8636db46] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.097922 52361 system_pods.go:61] "coredns-66bc5c9577-qgf47" [cf78af7a-9d0c-41de-86b8-cdcb6edb473f] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.097932 52361 system_pods.go:61] "etcd-newest-cni-147975" [4252185b-07b4-4a2c-a158-e5f1c642972b] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:56.097941 52361 system_pods.go:61] "kube-apiserver-newest-cni-147975" [8a53f33b-bf25-40e9-956d-ea5b022bf74d] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:56.097950 52361 system_pods.go:61] "kube-controller-manager-newest-cni-147975" [4acfba8a-ac1a-48ef-9e78-b0c14875bb27] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:45:56.097964 52361 system_pods.go:61] "kube-proxy-9pcbp" [847ce8ad-752e-4f1d-addb-429b2166cf93] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:45:56.097972 52361 system_pods.go:61] "kube-scheduler-newest-cni-147975" [a85a41fb-e8de-429a-bc86-79301882f478] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:45:56.097988 52361 system_pods.go:61] "metrics-server-746fcd58dc-mx8wv" [13cfe3e9-f58b-4179-8995-b1a4924fd34e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:56.097993 52361 system_pods.go:61] "storage-provisioner" [dfc448b3-9eb7-4baf-baa7-e1c638e45984] Running
I1102 13:45:56.098003 52361 system_pods.go:74] duration metric: took 4.680952ms to wait for pod list to return data ...
I1102 13:45:56.098013 52361 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:56.101813 52361 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:56.101840 52361 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:56.101850 52361 node_conditions.go:105] duration metric: took 3.827836ms to run NodePressure ...
I1102 13:45:56.101901 52361 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:45:56.384024 52361 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1102 13:45:56.399387 52361 ops.go:34] apiserver oom_adj: -16
I1102 13:45:56.399409 52361 kubeadm.go:602] duration metric: took 8.493235964s to restartPrimaryControlPlane
I1102 13:45:56.399417 52361 kubeadm.go:403] duration metric: took 8.528183618s to StartCluster
	I1102 13:45:56.399431   52361 settings.go:142] acquiring lock: {Name:mk2d74ff80d6e54b2738086ad41016418abd2f10 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:45:56.399513 52361 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21808-9383/kubeconfig
	I1102 13:45:56.400275   52361 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:45:56.400526   52361 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.61.81 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:45:56.400618 52361 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1102 13:45:56.400721 52361 addons.go:70] Setting storage-provisioner=true in profile "newest-cni-147975"
I1102 13:45:56.400740 52361 addons.go:239] Setting addon storage-provisioner=true in "newest-cni-147975"
W1102 13:45:56.400751 52361 addons.go:248] addon storage-provisioner should already be in state true
I1102 13:45:56.400757 52361 addons.go:70] Setting default-storageclass=true in profile "newest-cni-147975"
I1102 13:45:56.400781 52361 host.go:66] Checking if "newest-cni-147975" exists ...
I1102 13:45:56.400791 52361 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "newest-cni-147975"
I1102 13:45:56.400798 52361 addons.go:70] Setting dashboard=true in profile "newest-cni-147975"
I1102 13:45:56.400820 52361 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:56.400841 52361 addons.go:239] Setting addon dashboard=true in "newest-cni-147975"
W1102 13:45:56.400851 52361 addons.go:248] addon dashboard should already be in state true
I1102 13:45:56.400884 52361 host.go:66] Checking if "newest-cni-147975" exists ...
	I1102 13:45:56.400898   52361 cache.go:107] acquiring lock: {Name:mkfde24ce23f92e3eaf637254ed5ac4355c07159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:45:56.400947 52361 addons.go:70] Setting metrics-server=true in profile "newest-cni-147975"
I1102 13:45:56.400974 52361 addons.go:239] Setting addon metrics-server=true in "newest-cni-147975"
W1102 13:45:56.400982 52361 addons.go:248] addon metrics-server should already be in state true
I1102 13:45:56.400987 52361 cache.go:115] /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1102 13:45:56.400995 52361 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 106.166µs
I1102 13:45:56.401003 52361 host.go:66] Checking if "newest-cni-147975" exists ...
I1102 13:45:56.401004 52361 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1102 13:45:56.401119 52361 cache.go:87] Successfully saved all images to host disk.
I1102 13:45:56.401304 52361 config.go:182] Loaded profile config "newest-cni-147975": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:56.402945 52361 out.go:179] * Verifying Kubernetes components...
I1102 13:45:56.404264 52361 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1102 13:45:56.404270 52361 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:56.404303 52361 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1102 13:45:56.404535 52361 addons.go:239] Setting addon default-storageclass=true in "newest-cni-147975"
W1102 13:45:56.404554 52361 addons.go:248] addon default-storageclass should already be in state true
I1102 13:45:56.404574 52361 host.go:66] Checking if "newest-cni-147975" exists ...
	I1102 13:45:56.404800   52361 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:45:56.405442 52361 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1102 13:45:56.405483 52361 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:56.405498 52361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1102 13:45:56.406423 52361 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1102 13:45:56.406427 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1102 13:45:56.406524 52361 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1102 13:45:56.407066 52361 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:56.407085 52361 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1102 13:45:56.407450 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1102 13:45:56.407470 52361 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
I1102 13:45:56.409277 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.410208   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.410241 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:56.410279 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.410488   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                	I1102 13:45:56.410937   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.410999 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.411233   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:56.411461 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:56.411604 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
I1102 13:45:56.411734 52361 main.go:143] libmachine: domain newest-cni-147975 has defined MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.411955   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.411992 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.412064   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                I1102 13:45:56.412099 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.412162   52361 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:22:13:52", ip: ""} in network mk-newest-cni-147975: {Iface:virbr3 ExpiryTime:2025-11-02 14:45:33 +0000 UTC Type:0 Mac:52:54:00:22:13:52 Iaid: IPaddr:192.168.61.81 Prefix:24 Hostname:newest-cni-147975 Clientid:01:52:54:00:22:13:52}
                                                
                                                	I1102 13:45:56.412158   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:56.412193 52361 main.go:143] libmachine: domain newest-cni-147975 has defined IP address 192.168.61.81 and MAC address 52:54:00:22:13:52 in network mk-newest-cni-147975
	I1102 13:45:56.412404   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                	I1102 13:45:56.412597   52361 sshutil.go:53] new ssh client: &{IP:192.168.61.81 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/newest-cni-147975/id_rsa Username:docker}
                                                
                                                I1102 13:45:56.688581 52361 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:45:56.714679 52361 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:45:56.714750 52361 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:45:56.744715 52361 api_server.go:72] duration metric: took 344.158891ms to wait for apiserver process to appear ...
I1102 13:45:56.744741 52361 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:45:56.744758 52361 api_server.go:253] Checking apiserver healthz at https://192.168.61.81:8443/healthz ...
I1102 13:45:56.750446 52361 api_server.go:279] https://192.168.61.81:8443/healthz returned 200:
ok
I1102 13:45:56.751676 52361 api_server.go:141] control plane version: v1.34.1
I1102 13:45:56.751699 52361 api_server.go:131] duration metric: took 6.950433ms to wait for apiserver health ...
I1102 13:45:56.751710 52361 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:45:56.759183 52361 system_pods.go:59] 9 kube-system pods found
I1102 13:45:56.759216 52361 system_pods.go:61] "coredns-66bc5c9577-9kzzv" [c30e9e61-e9a7-41ae-9d6f-d74a8636db46] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.759229 52361 system_pods.go:61] "coredns-66bc5c9577-qgf47" [cf78af7a-9d0c-41de-86b8-cdcb6edb473f] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:45:56.759246 52361 system_pods.go:61] "etcd-newest-cni-147975" [4252185b-07b4-4a2c-a158-e5f1c642972b] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:45:56.759253 52361 system_pods.go:61] "kube-apiserver-newest-cni-147975" [8a53f33b-bf25-40e9-956d-ea5b022bf74d] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:45:56.759259 52361 system_pods.go:61] "kube-controller-manager-newest-cni-147975" [4acfba8a-ac1a-48ef-9e78-b0c14875bb27] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:45:56.759265 52361 system_pods.go:61] "kube-proxy-9pcbp" [847ce8ad-752e-4f1d-addb-429b2166cf93] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:45:56.759270 52361 system_pods.go:61] "kube-scheduler-newest-cni-147975" [a85a41fb-e8de-429a-bc86-79301882f478] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:45:56.759275 52361 system_pods.go:61] "metrics-server-746fcd58dc-mx8wv" [13cfe3e9-f58b-4179-8995-b1a4924fd34e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:45:56.759279 52361 system_pods.go:61] "storage-provisioner" [dfc448b3-9eb7-4baf-baa7-e1c638e45984] Running
I1102 13:45:56.759287 52361 system_pods.go:74] duration metric: took 7.5715ms to wait for pod list to return data ...
I1102 13:45:56.759298 52361 default_sa.go:34] waiting for default service account to be created ...
I1102 13:45:56.763228 52361 default_sa.go:45] found service account: "default"
I1102 13:45:56.763256 52361 default_sa.go:55] duration metric: took 3.951354ms for default service account to be created ...
I1102 13:45:56.763271 52361 kubeadm.go:587] duration metric: took 362.717936ms to wait for: map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true]
I1102 13:45:56.763289 52361 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:45:56.768746 52361 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:45:56.768769 52361 node_conditions.go:123] node cpu capacity is 2
I1102 13:45:56.768782 52361 node_conditions.go:105] duration metric: took 5.486448ms to run NodePressure ...
I1102 13:45:56.768798 52361 start.go:242] waiting for startup goroutines ...
I1102 13:45:56.944891 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1102 13:45:56.944915 52361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1102 13:45:56.969778 52361 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:45:56.969805 52361 cache_images.go:86] Images are preloaded, skipping loading
I1102 13:45:56.969816 52361 cache_images.go:264] succeeded pushing to: newest-cni-147975
I1102 13:45:56.991460 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1102 13:45:56.993360 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1102 13:45:56.993381 52361 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1102 13:45:57.016284 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:45:57.043942 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1102 13:45:57.043964 52361 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1102 13:45:57.063179 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1102 13:45:57.063207 52361 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1102 13:45:57.103218 52361 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:57.103246 52361 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1102 13:45:57.120682 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1102 13:45:57.120722 52361 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1102 13:45:57.158831 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:45:57.183928 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1102 13:45:57.183956 52361 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1102 13:45:57.245005 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1102 13:45:57.245040 52361 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1102 13:45:57.317172 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1102 13:45:57.317202 52361 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1102 13:45:53.488158 52370 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.72.63:22: connect: connection refused
I1102 13:45:56.610217 52370 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1102 13:45:56.614168 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.614598   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.614631 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.614943 52370 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/config.json ...
I1102 13:45:56.615239 52370 machine.go:94] provisionDockerMachine start ...
I1102 13:45:56.617745 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.618205   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.618239 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.618488 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:56.618794   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:56.618812 52370 main.go:143] libmachine: About to run SSH command:
hostname
I1102 13:45:56.737480 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1102 13:45:56.737514 52370 buildroot.go:166] provisioning hostname "no-preload-047294"
I1102 13:45:56.741083 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.741556   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.741581 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.741755 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:56.742047   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:56.742065 52370 main.go:143] libmachine: About to run SSH command:
sudo hostname no-preload-047294 && echo "no-preload-047294" | sudo tee /etc/hostname
I1102 13:45:56.885194 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: no-preload-047294
I1102 13:45:56.888085 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:56.888599   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:56.888634 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:56.888846 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:56.889114   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:56.889139 52370 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sno-preload-047294' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 no-preload-047294/g' /etc/hosts;
else
echo '127.0.1.1 no-preload-047294' | sudo tee -a /etc/hosts;
fi
fi
I1102 13:45:57.022832 52370 main.go:143] libmachine: SSH cmd err, output: <nil>:
	I1102 13:45:57.022861   52370 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21808-9383/.minikube CaCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21808-9383/.minikube}
                                                
                                                I1102 13:45:57.022883 52370 buildroot.go:174] setting up certificates
I1102 13:45:57.022894 52370 provision.go:84] configureAuth start
I1102 13:45:57.026486 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.027035   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.027080 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.029781 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.030259   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.030282 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.030445 52370 provision.go:143] copyHostCerts
I1102 13:45:57.030489 52370 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem, removing ...
I1102 13:45:57.030506 52370 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem
I1102 13:45:57.030569 52370 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem (1082 bytes)
I1102 13:45:57.030696 52370 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem, removing ...
I1102 13:45:57.030705 52370 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem
I1102 13:45:57.030734 52370 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem (1123 bytes)
I1102 13:45:57.030800 52370 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem, removing ...
I1102 13:45:57.030814 52370 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem
I1102 13:45:57.030842 52370 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem (1675 bytes)
I1102 13:45:57.030903 52370 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem org=jenkins.no-preload-047294 san=[127.0.0.1 192.168.72.63 localhost minikube no-preload-047294]
I1102 13:45:57.401279 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1102 13:45:57.401305 52361 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1102 13:45:57.478491 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1102 13:45:57.478524 52361 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1102 13:45:57.545072 52361 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:57.545096 52361 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1102 13:45:57.615754 52361 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:45:58.907853 52361 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.891527315s)
I1102 13:45:58.915190 52361 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.756319413s)
I1102 13:45:58.915238 52361 addons.go:480] Verifying addon metrics-server=true in "newest-cni-147975"
I1102 13:45:59.192585 52361 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.576777475s)
I1102 13:45:59.194323 52361 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p newest-cni-147975 addons enable metrics-server
I1102 13:45:59.196091 52361 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1102 13:45:59.197461 52361 addons.go:515] duration metric: took 2.796848686s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
I1102 13:45:59.197511 52361 start.go:247] waiting for cluster config update ...
I1102 13:45:59.197531 52361 start.go:256] writing updated cluster config ...
I1102 13:45:59.197880 52361 ssh_runner.go:195] Run: rm -f paused
I1102 13:45:59.254393 52361 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0)
I1102 13:45:59.255941 52361 out.go:179] * Done! kubectl is now configured to use "newest-cni-147975" cluster and "default" namespace by default
I1102 13:45:59.477039 52806 start.go:364] duration metric: took 31.604839295s to acquireMachinesLock for "embed-certs-705938"
	I1102 13:45:59.477108   52806 start.go:93] Provisioning new machine with config: &{Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{K
                                                
                                                ubernetesVersion:v1.34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker
                                                
                                                BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:45:59.477222 52806 start.go:125] createHost starting for "" (driver="kvm2")
I1102 13:45:57.583058 52370 provision.go:177] copyRemoteCerts
I1102 13:45:57.583132 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1102 13:45:57.586046 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.586503   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.586535 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.586686   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:45:57.678038 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1102 13:45:57.713691 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I1102 13:45:57.747399 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1102 13:45:57.776834 52370 provision.go:87] duration metric: took 753.922019ms to configureAuth
I1102 13:45:57.776873 52370 buildroot.go:189] setting minikube options for container-runtime
I1102 13:45:57.777104 52370 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:45:57.779796 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.780355   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.780393 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.780606 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:57.780874   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:57.780889 52370 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1102 13:45:57.901865 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1102 13:45:57.901895 52370 buildroot.go:70] root file system type: tmpfs
I1102 13:45:57.902036 52370 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1102 13:45:57.905419 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:57.905926   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:57.905963 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:57.906383 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:57.906673   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:57.906754 52370 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1102 13:45:58.044239 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1102 13:45:58.047301 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:58.047691   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:58.047728 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:58.047920 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:58.048159   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:58.048178 52370 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
                                                
                                                I1102 13:45:59.198840 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1102 13:45:59.198871 52370 machine.go:97] duration metric: took 2.583615843s to provisionDockerMachine
I1102 13:45:59.198887 52370 start.go:293] postStartSetup for "no-preload-047294" (driver="kvm2")
I1102 13:45:59.198899 52370 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1102 13:45:59.198955 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1102 13:45:59.202023 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.202491   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.202518 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.202679   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:45:59.295390 52370 ssh_runner.go:195] Run: cat /etc/os-release
I1102 13:45:59.301095 52370 info.go:137] Remote host: Buildroot 2025.02
I1102 13:45:59.301126 52370 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/addons for local assets ...
I1102 13:45:59.301195 52370 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/files for local assets ...
I1102 13:45:59.301317 52370 filesync.go:149] local asset: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem -> 132702.pem in /etc/ssl/certs
I1102 13:45:59.301472 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1102 13:45:59.315721 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:59.353954 52370 start.go:296] duration metric: took 155.052404ms for postStartSetup
I1102 13:45:59.353996 52370 fix.go:56] duration metric: took 19.488564286s for fixHost
I1102 13:45:59.357200 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.357763   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.357802 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:59.358072 52370 main.go:143] libmachine: Using SSH client type: native
	I1102 13:45:59.358370   52370 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.72.63 22 <nil> <nil>}
                                                
                                                I1102 13:45:59.358393 52370 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1102 13:45:59.476875 52370 main.go:143] libmachine: SSH cmd err, output: <nil>: 1762091159.443263241
I1102 13:45:59.476904 52370 fix.go:216] guest clock: 1762091159.443263241
I1102 13:45:59.476913 52370 fix.go:229] Guest: 2025-11-02 13:45:59.443263241 +0000 UTC Remote: 2025-11-02 13:45:59.35400079 +0000 UTC m=+51.971877286 (delta=89.262451ms)
I1102 13:45:59.476929 52370 fix.go:200] guest clock delta is within tolerance: 89.262451ms
I1102 13:45:59.476935 52370 start.go:83] releasing machines lock for "no-preload-047294", held for 19.611548915s
I1102 13:45:59.480372 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.480900   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.480933 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:45:59.481279 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:45:59.481332 52370 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:45:59.481362 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:45:59.481404 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:45:59.481437 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:45:59.481472 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:45:59.481531 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:45:59.481620 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:45:59.484433 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.484830   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:45:59.484851 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:45:59.485012   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:45:59.604225 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:45:59.644148 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:45:59.684494 52370 ssh_runner.go:195] Run: openssl version
I1102 13:45:59.692201 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:45:59.712037 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:59.718376 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:59.718450 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:45:59.727594 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:45:59.748974 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:45:59.769545 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:45:59.776210 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:45:59.776285 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:45:59.784503 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:45:59.802496 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:45:59.819078 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:45:59.825426 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:45:59.825502 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:45:59.833285 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:45:59.847905 52370 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-certificates >/dev/null 2>&1 && sudo update-ca-certificates || true"
I1102 13:45:59.853749 52370 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-trust >/dev/null 2>&1 && sudo update-ca-trust extract || true"
I1102 13:45:59.858856 52370 ssh_runner.go:195] Run: cat /version.json
I1102 13:45:59.858951 52370 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1102 13:45:59.890927 52370 ssh_runner.go:195] Run: systemctl --version
I1102 13:45:59.897644 52370 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1102 13:45:59.904679 52370 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1102 13:45:59.904750   52370 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
                                                
                                                I1102 13:45:59.935576 52370 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1102 13:45:59.935606 52370 start.go:496] detecting cgroup driver to use...
I1102 13:45:59.935730 52370 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:45:59.964279 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1102 13:45:59.979683 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1102 13:45:59.993474 52370 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1102 13:45:59.993536 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1102 13:46:00.008230 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:00.022562 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1102 13:46:00.036229 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:00.050572 52370 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1102 13:46:00.067799 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1102 13:46:00.081907 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1102 13:46:00.096308 52370 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1102 13:46:00.114387 52370 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1102 13:46:00.130483 52370 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1102 13:46:00.130563 52370 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1102 13:46:00.150302 52370 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1102 13:46:00.167595 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:00.335252 52370 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1102 13:46:00.384484 52370 start.go:496] detecting cgroup driver to use...
I1102 13:46:00.384583 52370 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1102 13:46:00.402739 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:00.421297 52370 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1102 13:46:00.448214 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:00.467301 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:00.485501 52370 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1102 13:46:00.529031 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:00.548789 52370 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:46:00.576038 52370 ssh_runner.go:195] Run: which cri-dockerd
I1102 13:46:00.580513 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1102 13:46:00.593719 52370 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1102 13:46:00.619204 52370 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1102 13:46:00.826949 52370 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1102 13:46:01.010283 52370 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1102 13:46:01.010413 52370 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1102 13:46:01.035123 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:46:01.052102 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:01.236793 52370 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:46:01.926974 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1102 13:46:01.947750 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1102 13:46:01.966571 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:01.988323 52370 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1102 13:46:02.198426 52370 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1102 13:46:02.389146 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:45:59.479214 52806 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
I1102 13:45:59.479488 52806 start.go:159] libmachine.API.Create for "embed-certs-705938" (driver="kvm2")
I1102 13:45:59.479530 52806 client.go:173] LocalClient.Create starting
I1102 13:45:59.479625 52806 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem
I1102 13:45:59.479679 52806 main.go:143] libmachine: Decoding PEM data...
I1102 13:45:59.479711 52806 main.go:143] libmachine: Parsing certificate...
I1102 13:45:59.479809 52806 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem
I1102 13:45:59.479847 52806 main.go:143] libmachine: Decoding PEM data...
I1102 13:45:59.479870 52806 main.go:143] libmachine: Parsing certificate...
I1102 13:45:59.480302 52806 main.go:143] libmachine: creating domain...
I1102 13:45:59.480322 52806 main.go:143] libmachine: creating network...
I1102 13:45:59.482237 52806 main.go:143] libmachine: found existing default network
I1102 13:45:59.482497 52806 main.go:143] libmachine: <network connections='4'>
<name>default</name>
<uuid>c61344c2-dba2-46dd-a21a-34776d235985</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:10:a2:1d'/>
<ip address='192.168.122.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.122.2' end='192.168.122.254'/>
</dhcp>
</ip>
</network>
	I1102 13:45:59.483419   52806 network.go:211] skipping subnet 192.168.39.0/24 that is taken: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName:virbr1 IfaceIPv4:192.168.39.1 IfaceMTU:1500 IfaceMAC:52:54:00:82:31:61} reservation:<nil>}
                                                
                                                	I1102 13:45:59.484544   52806 network.go:206] using free private subnet 192.168.50.0/24: &{IP:192.168.50.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.50.0/24 Gateway:192.168.50.1 ClientMin:192.168.50.2 ClientMax:192.168.50.254 Broadcast:192.168.50.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001dff020}
                                                
                                                I1102 13:45:59.484640 52806 main.go:143] libmachine: defining private network:
<network>
<name>mk-embed-certs-705938</name>
<dns enable='no'/>
<ip address='192.168.50.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.50.2' end='192.168.50.253'/>
</dhcp>
</ip>
</network>
I1102 13:45:59.492966 52806 main.go:143] libmachine: creating private network mk-embed-certs-705938 192.168.50.0/24...
I1102 13:45:59.583327 52806 main.go:143] libmachine: private network mk-embed-certs-705938 192.168.50.0/24 created
I1102 13:45:59.583618 52806 main.go:143] libmachine: <network>
<name>mk-embed-certs-705938</name>
<uuid>04682b65-7e72-41b1-ae2d-736eef505059</uuid>
<bridge name='virbr2' stp='on' delay='0'/>
<mac address='52:54:00:dc:11:a7'/>
<dns enable='no'/>
<ip address='192.168.50.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.50.2' end='192.168.50.253'/>
</dhcp>
</ip>
</network>
I1102 13:45:59.583664 52806 main.go:143] libmachine: setting up store path in /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938 ...
I1102 13:45:59.583715 52806 main.go:143] libmachine: building disk image from file:///home/jenkins/minikube-integration/21808-9383/.minikube/cache/iso/amd64/minikube-v1.37.0-1761658712-21800-amd64.iso
I1102 13:45:59.583730 52806 common.go:152] Making disk image using store path: /home/jenkins/minikube-integration/21808-9383/.minikube
I1102 13:45:59.583815 52806 main.go:143] libmachine: Downloading /home/jenkins/minikube-integration/21808-9383/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/21808-9383/.minikube/cache/iso/amd64/minikube-v1.37.0-1761658712-21800-amd64.iso...
I1102 13:45:59.852164 52806 common.go:159] Creating ssh key: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa...
I1102 13:45:59.984327 52806 common.go:165] Creating raw disk image: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/embed-certs-705938.rawdisk...
I1102 13:45:59.984383 52806 main.go:143] libmachine: Writing magic tar header
I1102 13:45:59.984407 52806 main.go:143] libmachine: Writing SSH key tar header
I1102 13:45:59.984516 52806 common.go:179] Fixing permissions on /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938 ...
I1102 13:45:59.984609 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938
I1102 13:45:59.984657 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938 (perms=drwx------)
I1102 13:45:59.984690 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383/.minikube/machines
I1102 13:45:59.984709 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383/.minikube/machines (perms=drwxr-xr-x)
I1102 13:45:59.984729 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383/.minikube
I1102 13:45:59.984748 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383/.minikube (perms=drwxr-xr-x)
I1102 13:45:59.984763 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21808-9383
I1102 13:45:59.984783 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21808-9383 (perms=drwxrwxr-x)
I1102 13:45:59.984800 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration
I1102 13:45:59.984814 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration (perms=drwxrwxr-x)
I1102 13:45:59.984828 52806 main.go:143] libmachine: checking permissions on dir: /home/jenkins
I1102 13:45:59.984839 52806 main.go:143] libmachine: setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
I1102 13:45:59.984851 52806 main.go:143] libmachine: checking permissions on dir: /home
I1102 13:45:59.984883 52806 main.go:143] libmachine: skipping /home - not owner
I1102 13:45:59.984894 52806 main.go:143] libmachine: defining domain...
I1102 13:45:59.986152 52806 main.go:143] libmachine: defining domain using XML:
<domain type='kvm'>
<name>embed-certs-705938</name>
<memory unit='MiB'>3072</memory>
<vcpu>2</vcpu>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough'>
</cpu>
<os>
<type>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<devices>
<disk type='file' device='cdrom'>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='default' io='threads' />
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/embed-certs-705938.rawdisk'/>
<target dev='hda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='mk-embed-certs-705938'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
</rng>
</devices>
</domain>
I1102 13:45:59.995830 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:1e:e2:b0 in network default
I1102 13:45:59.996536 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:45:59.996554 52806 main.go:143] libmachine: starting domain...
I1102 13:45:59.996559 52806 main.go:143] libmachine: ensuring networks are active...
I1102 13:45:59.997462 52806 main.go:143] libmachine: Ensuring network default is active
I1102 13:45:59.997971 52806 main.go:143] libmachine: Ensuring network mk-embed-certs-705938 is active
I1102 13:45:59.998947 52806 main.go:143] libmachine: getting domain XML...
I1102 13:46:00.000542 52806 main.go:143] libmachine: starting domain XML:
<domain type='kvm'>
<name>embed-certs-705938</name>
<uuid>32fc3864-8fea-4d1b-850e-64e3e9ecc065</uuid>
<memory unit='KiB'>3145728</memory>
<currentMemory unit='KiB'>3145728</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
<boot dev='cdrom'/>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/boot2docker.iso'/>
<target dev='hdc' bus='scsi'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='2'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' io='threads'/>
<source file='/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/embed-certs-705938.rawdisk'/>
<target dev='hda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</controller>
<interface type='network'>
<mac address='52:54:00:54:db:56'/>
<source network='mk-embed-certs-705938'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</interface>
<interface type='network'>
<mac address='52:54:00:1e:e2:b0'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<audio id='1' type='none'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</rng>
</devices>
</domain>
I1102 13:46:01.577152 52806 main.go:143] libmachine: waiting for domain to start...
I1102 13:46:01.579225 52806 main.go:143] libmachine: domain is now running
I1102 13:46:01.579247 52806 main.go:143] libmachine: waiting for IP...
I1102 13:46:01.580381 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:01.581278 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:01.581301 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:01.581911 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:01.581961 52806 retry.go:31] will retry after 283.516454ms: waiting for domain to come up
I1102 13:46:01.867902 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:01.868662 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:01.868680 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:01.869097 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:01.869146 52806 retry.go:31] will retry after 322.823728ms: waiting for domain to come up
I1102 13:46:02.193585 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:02.194327 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:02.194359 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:02.194870 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:02.194913 52806 retry.go:31] will retry after 355.185879ms: waiting for domain to come up
I1102 13:46:02.551677 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:02.681737 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:02.681759 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:02.682464 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:02.682510 52806 retry.go:31] will retry after 460.738696ms: waiting for domain to come up
I1102 13:46:02.565781 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1102 13:46:02.617258 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:02.637998 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:02.806275 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:02.834599 52370 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:46:02.854685 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:02.878203 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:02.897523 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:03.087812 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:03.114411 52370 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
I1102 13:46:03.130670 52370 retry.go:31] will retry after 1.304462067s: cri-docker.service not running
I1102 13:46:04.435472 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:04.456844 52370 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:04.472965 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:04.664589 52370 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:04.825530 52370 retry.go:31] will retry after 1.319662502s: cri-docker.service not running
I1102 13:46:06.145463 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:06.164945 52370 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1102 13:46:06.165021 52370 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1102 13:46:06.171435 52370 start.go:564] Will wait 60s for crictl version
I1102 13:46:06.171507 52370 ssh_runner.go:195] Run: which crictl
I1102 13:46:06.175734 52370 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1102 13:46:06.219786 52370 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
	I1102 13:46:06.219864   52370 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                	I1102 13:46:06.249106   52370 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                I1102 13:46:06.350551 52370 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:46:06.353328 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:06.353762   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:06.353789 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:46:06.354004 52370 ssh_runner.go:195] Run: grep 192.168.72.1 host.minikube.internal$ /etc/hosts
	I1102 13:46:06.359460   52370 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.72.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                	I1102 13:46:06.375911   52370 kubeadm.go:884] updating cluster {Name:no-preload-047294 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
                                                
                                                .34.1 ClusterName:no-preload-047294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.63 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNo
                                                
                                                deRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:46:06.376058 52370 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:46:06.376096   52370 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:06.398237 52370 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:46:06.398262 52370 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:46:06.398271   52370 kubeadm.go:935] updating node { 192.168.72.63 8443 v1.34.1 docker true true} ...
                                                
                                                I1102 13:46:06.398392 52370 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=no-preload-047294 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.72.63
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:no-preload-047294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:46:06.398467   52370 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:46:06.459190 52370 cni.go:84] Creating CNI manager for ""
I1102 13:46:06.459232 52370 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:46:06.459248 52370 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1102 13:46:06.459274   52370 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.72.63 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:no-preload-047294 NodeName:no-preload-047294 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.72.63"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.72.63 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath
                                                
                                                :/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:46:06.459465 52370 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.72.63
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "no-preload-047294"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.72.63"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.72.63"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:46:06.459538 52370 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:46:06.472592 52370 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:46:06.472665 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:46:06.485059 52370 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I1102 13:46:06.505950 52370 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:46:06.526204 52370 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2221 bytes)
I1102 13:46:06.548166 52370 ssh_runner.go:195] Run: grep 192.168.72.63 control-plane.minikube.internal$ /etc/hosts
	I1102 13:46:06.553556   52370 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.72.63	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:46:06.574072 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:06.740520 52370 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:46:06.778762 52370 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294 for IP: 192.168.72.63
I1102 13:46:06.778787 52370 certs.go:195] generating shared ca certs ...
	I1102 13:46:06.778802   52370 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:06.778979 52370 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:46:06.779039 52370 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:46:06.779054 52370 certs.go:257] generating profile certs ...
I1102 13:46:06.779158 52370 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/client.key
I1102 13:46:06.779228 52370 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/apiserver.key.8b0bc00d
I1102 13:46:06.779288 52370 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/proxy-client.key
I1102 13:46:06.779447 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:46:06.779492 52370 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:46:06.779510 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:46:06.779548 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:46:06.779581 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:46:06.779613 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:46:06.779673 52370 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:06.780429 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:46:06.826941 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:46:06.872412 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:46:06.919682 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:46:06.978960 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I1102 13:46:07.012810 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1102 13:46:07.048070 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:46:07.079490 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/no-preload-047294/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1102 13:46:07.116863 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:46:07.150279 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:46:07.192312 52370 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:46:07.229893 52370 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:46:07.250158 52370 ssh_runner.go:195] Run: openssl version
I1102 13:46:07.256701 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:46:07.270321 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:07.275945 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:07.275998 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:07.284035 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:46:07.296858 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:46:07.310533 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:46:07.317211 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:46:07.317285 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:46:07.324943 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:46:07.336779 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:46:07.350857 52370 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:46:07.357908 52370 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:46:07.357969 52370 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:46:07.368355 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:46:07.381794 52370 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:46:07.387640 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1102 13:46:07.395907 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1102 13:46:07.403326 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1102 13:46:07.412467 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1102 13:46:07.422486 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1102 13:46:07.430295 52370 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1102 13:46:07.439746   52370 kubeadm.go:401] StartCluster: {Name:no-preload-047294 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
                                                
                                                .1 ClusterName:no-preload-047294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.63 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeR
                                                
                                                equested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:46:07.439918   52370 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:46:03.145374 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:03.146146 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:03.146162 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:03.146719 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:03.146754 52806 retry.go:31] will retry after 704.147192ms: waiting for domain to come up
I1102 13:46:03.852816 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:03.853888 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:03.853923 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:03.854438 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:03.854487 52806 retry.go:31] will retry after 839.707232ms: waiting for domain to come up
I1102 13:46:04.695645 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:04.696611 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:04.696632 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:04.697109 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:04.697146 52806 retry.go:31] will retry after 821.241975ms: waiting for domain to come up
I1102 13:46:05.520124 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:05.520894 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:05.520916 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:05.521369 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:05.521406 52806 retry.go:31] will retry after 1.267201306s: waiting for domain to come up
I1102 13:46:06.790524 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:06.791257 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:06.791276 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:06.791681 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:06.791721 52806 retry.go:31] will retry after 1.300732149s: waiting for domain to come up
I1102 13:46:07.468395 52370 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:46:07.483402 52370 kubeadm.go:417] found existing configuration files, will attempt cluster restart
I1102 13:46:07.483425 52370 kubeadm.go:598] restartPrimaryControlPlane start ...
I1102 13:46:07.483484 52370 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1102 13:46:07.497032 52370 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1102 13:46:07.497623 52370 kubeconfig.go:47] verify endpoint returned: get endpoint: "no-preload-047294" does not appear in /home/jenkins/minikube-integration/21808-9383/kubeconfig
I1102 13:46:07.497875 52370 kubeconfig.go:62] /home/jenkins/minikube-integration/21808-9383/kubeconfig needs updating (will repair): [kubeconfig missing "no-preload-047294" cluster setting kubeconfig missing "no-preload-047294" context setting]
	I1102 13:46:07.498285   52370 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:07.499579 52370 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1102 13:46:07.511419 52370 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.72.63
I1102 13:46:07.511456 52370 kubeadm.go:1161] stopping kube-system containers ...
	I1102 13:46:07.511512   52370 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:46:07.533372 52370 docker.go:484] Stopping containers: [53aacf7a8c25 e2e83452259d caea875f38d2 3d285a4769ed 5c74e9de1020 7f0ca45f2366 47f3841a16f1 9ec2a9113b2f 71dc31d7068c 6b5f5b0d4dda 0d441b292a67 92c90b4f3e13 dc5e08e88d0a ae18505f9975 d663f497e9c9 d071178686a8 542226c154fb]
I1102 13:46:07.533460 52370 ssh_runner.go:195] Run: docker stop 53aacf7a8c25 e2e83452259d caea875f38d2 3d285a4769ed 5c74e9de1020 7f0ca45f2366 47f3841a16f1 9ec2a9113b2f 71dc31d7068c 6b5f5b0d4dda 0d441b292a67 92c90b4f3e13 dc5e08e88d0a ae18505f9975 d663f497e9c9 d071178686a8 542226c154fb
I1102 13:46:07.555045 52370 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I1102 13:46:07.575939 52370 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:46:07.588797 52370 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:46:07.588839 52370 kubeadm.go:158] found existing configuration files:
I1102 13:46:07.588903 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1102 13:46:07.600185 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:46:07.600253 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:46:07.612049 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1102 13:46:07.623377 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:46:07.623439 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:46:07.634967 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1102 13:46:07.645964 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:46:07.646028 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:46:07.658165 52370 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1102 13:46:07.668751 52370 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:46:07.668819 52370 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:46:07.680242 52370 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:46:07.691767 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:07.839761 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:08.943799 52370 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.103994694s)
I1102 13:46:08.943882 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:09.179272 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:09.267190 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:09.336193 52370 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:46:09.336277 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:09.836627 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:10.337333 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:10.837392 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:10.911635 52370 api_server.go:72] duration metric: took 1.575444882s to wait for apiserver process to appear ...
I1102 13:46:10.911672 52370 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:46:10.911697 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:10.912267 52370 api_server.go:269] stopped: https://192.168.72.63:8443/healthz: Get "https://192.168.72.63:8443/healthz": dial tcp 192.168.72.63:8443: connect: connection refused
I1102 13:46:11.412006 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:08.093977 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:08.094746 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:08.094767 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:08.095264 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:08.095297 52806 retry.go:31] will retry after 1.468485697s: waiting for domain to come up
I1102 13:46:09.565558 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:09.566316 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:09.566333 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:09.566791 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:09.566827 52806 retry.go:31] will retry after 1.801199922s: waiting for domain to come up
I1102 13:46:11.370817 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:11.371787 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:11.371806 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:11.372220 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:11.372259 52806 retry.go:31] will retry after 2.844673s: waiting for domain to come up
I1102 13:46:13.451636 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                W1102 13:46:13.451675 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
                                                
                                                I1102 13:46:13.451695 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:13.549566 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[-]poststarthook/start-apiextensions-controllers failed: reason withheld
[-]poststarthook/crd-informer-synced failed: reason withheld
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[-]poststarthook/start-service-ip-repair-controllers failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/start-kubernetes-service-cidr-controller failed: reason withheld
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:46:13.549608 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[-]etcd failed: reason withheld
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[-]poststarthook/start-apiextensions-controllers failed: reason withheld
[-]poststarthook/crd-informer-synced failed: reason withheld
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[-]poststarthook/start-service-ip-repair-controllers failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/priority-and-fairness-config-producer failed: reason withheld
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/start-kubernetes-service-cidr-controller failed: reason withheld
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[-]poststarthook/apiservice-registration-controller failed: reason withheld
[-]poststarthook/apiservice-discovery-controller failed: reason withheld
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:46:13.912124 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:13.918453 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:46:13.918480 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:46:14.412076 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:14.418219 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W1102 13:46:14.418244 52370 api_server.go:103] status: https://192.168.72.63:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-kubernetes-service-cidr-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I1102 13:46:14.911855 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:14.922266 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 200:
ok
I1102 13:46:14.949261 52370 api_server.go:141] control plane version: v1.34.1
I1102 13:46:14.949299 52370 api_server.go:131] duration metric: took 4.037618193s to wait for apiserver health ...
I1102 13:46:14.949311 52370 cni.go:84] Creating CNI manager for ""
I1102 13:46:14.949326 52370 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:46:14.950858 52370 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
I1102 13:46:14.952463 52370 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I1102 13:46:14.999278 52370 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I1102 13:46:15.054778 52370 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:46:15.064565 52370 system_pods.go:59] 8 kube-system pods found
I1102 13:46:15.064609 52370 system_pods.go:61] "coredns-66bc5c9577-th5tq" [65f5112e-1f3c-4f25-b91a-aa016db03acd] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:46:15.064619 52370 system_pods.go:61] "etcd-no-preload-047294" [150d4c86-f602-4a05-a8d3-6f54c6402abc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:46:15.064627 52370 system_pods.go:61] "kube-apiserver-no-preload-047294" [6d35fe6a-5edf-4a16-a84f-fd8f527f48fe] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:46:15.064633 52370 system_pods.go:61] "kube-controller-manager-no-preload-047294" [888bc1fb-1c42-44a9-aa46-ba3e9ee49ee4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:46:15.064639 52370 system_pods.go:61] "kube-proxy-nw5rx" [5dc5f78c-c165-402a-b834-f2f64e5ac4e2] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I1102 13:46:15.064644 52370 system_pods.go:61] "kube-scheduler-no-preload-047294" [af7ccc77-6a6b-4960-b6e4-63c69def3029] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:46:15.064652 52370 system_pods.go:61] "metrics-server-746fcd58dc-6pkxd" [65a57acb-e4e7-4fec-b299-0ae0667ed73a] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:46:15.064657 52370 system_pods.go:61] "storage-provisioner" [d6458b18-8300-444e-9e82-75cb7ba64d82] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I1102 13:46:15.064663 52370 system_pods.go:74] duration metric: took 9.863658ms to wait for pod list to return data ...
I1102 13:46:15.064676 52370 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:46:15.070973 52370 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:46:15.070998 52370 node_conditions.go:123] node cpu capacity is 2
I1102 13:46:15.071007 52370 node_conditions.go:105] duration metric: took 6.327227ms to run NodePressure ...
I1102 13:46:15.071052 52370 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I1102 13:46:15.456486 52370 kubeadm.go:729] waiting for restarted kubelet to initialise ...
I1102 13:46:15.468525 52370 kubeadm.go:744] kubelet initialised
I1102 13:46:15.468550 52370 kubeadm.go:745] duration metric: took 12.036942ms waiting for restarted kubelet to initialise ...
I1102 13:46:15.468569 52370 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1102 13:46:15.486589 52370 ops.go:34] apiserver oom_adj: -16
I1102 13:46:15.486615 52370 kubeadm.go:602] duration metric: took 8.003182012s to restartPrimaryControlPlane
I1102 13:46:15.486627 52370 kubeadm.go:403] duration metric: took 8.04689358s to StartCluster
	I1102 13:46:15.486646   52370 settings.go:142] acquiring lock: {Name:mk2d74ff80d6e54b2738086ad41016418abd2f10 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:15.486716 52370 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21808-9383/kubeconfig
	I1102 13:46:15.487838   52370 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/kubeconfig: {Name:mk95e08b031fa76046651ee45fd3a969ffc8e32e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                	I1102 13:46:15.488115   52370 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.72.63 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
                                                
                                                I1102 13:46:15.488187 52370 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1102 13:46:15.488294 52370 addons.go:70] Setting storage-provisioner=true in profile "no-preload-047294"
I1102 13:46:15.488315 52370 addons.go:239] Setting addon storage-provisioner=true in "no-preload-047294"
W1102 13:46:15.488327 52370 addons.go:248] addon storage-provisioner should already be in state true
I1102 13:46:15.488333 52370 addons.go:70] Setting default-storageclass=true in profile "no-preload-047294"
I1102 13:46:15.488362 52370 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:46:15.488369 52370 addons.go:70] Setting metrics-server=true in profile "no-preload-047294"
I1102 13:46:15.488375 52370 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "no-preload-047294"
I1102 13:46:15.488381 52370 addons.go:239] Setting addon metrics-server=true in "no-preload-047294"
W1102 13:46:15.488389 52370 addons.go:248] addon metrics-server should already be in state true
I1102 13:46:15.488404 52370 host.go:66] Checking if "no-preload-047294" exists ...
	I1102 13:46:15.488430   52370 cache.go:107] acquiring lock: {Name:mkfde24ce23f92e3eaf637254ed5ac4355c07159 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
                                                
                                                I1102 13:46:15.488498 52370 cache.go:115] /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
I1102 13:46:15.488514 52370 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 90.577µs
I1102 13:46:15.488523 52370 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21808-9383/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
I1102 13:46:15.488531 52370 cache.go:87] Successfully saved all images to host disk.
I1102 13:46:15.488517 52370 addons.go:70] Setting dashboard=true in profile "no-preload-047294"
I1102 13:46:15.488618 52370 addons.go:239] Setting addon dashboard=true in "no-preload-047294"
W1102 13:46:15.488641 52370 addons.go:248] addon dashboard should already be in state true
I1102 13:46:15.488695 52370 config.go:182] Loaded profile config "no-preload-047294": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:46:15.488723 52370 host.go:66] Checking if "no-preload-047294" exists ...
I1102 13:46:15.488364 52370 host.go:66] Checking if "no-preload-047294" exists ...
I1102 13:46:15.490928 52370 out.go:179] * Verifying Kubernetes components...
	I1102 13:46:15.491588   52370 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:15.492641 52370 addons.go:239] Setting addon default-storageclass=true in "no-preload-047294"
W1102 13:46:15.492661 52370 addons.go:248] addon default-storageclass should already be in state true
I1102 13:46:15.492683 52370 host.go:66] Checking if "no-preload-047294" exists ...
I1102 13:46:15.493697 52370 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:15.494462 52370 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
I1102 13:46:15.494479 52370 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1102 13:46:15.494792 52370 out.go:179] - Using image fake.domain/registry.k8s.io/echoserver:1.4
I1102 13:46:15.494801 52370 out.go:179] - Using image docker.io/kubernetesui/dashboard:v2.7.0
I1102 13:46:15.494811 52370 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1102 13:46:15.495005 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.495472   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.495505 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.495662   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.496015 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1102 13:46:15.496033 52370 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1102 13:46:15.496037 52370 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:46:15.496051 52370 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1102 13:46:15.497316 52370 out.go:179] - Using image registry.k8s.io/echoserver:1.4
I1102 13:46:15.497671 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:46:15.498541 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
I1102 13:46:15.498557 52370 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I1102 13:46:15.498589   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.498622 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.498859   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.499924 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
I1102 13:46:15.499938 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.500698   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.500729 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.500777   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.500812 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.500904   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                	I1102 13:46:15.501148   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.501880 52370 main.go:143] libmachine: domain no-preload-047294 has defined MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.502298   52370 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:26:8d:fe", ip: ""} in network mk-no-preload-047294: {Iface:virbr4 ExpiryTime:2025-11-02 14:45:52 +0000 UTC Type:0 Mac:52:54:00:26:8d:fe Iaid: IPaddr:192.168.72.63 Prefix:24 Hostname:no-preload-047294 Clientid:01:52:54:00:26:8d:fe}
                                                
                                                I1102 13:46:15.502323 52370 main.go:143] libmachine: domain no-preload-047294 has defined IP address 192.168.72.63 and MAC address 52:54:00:26:8d:fe in network mk-no-preload-047294
	I1102 13:46:15.502486   52370 sshutil.go:53] new ssh client: &{IP:192.168.72.63 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/no-preload-047294/id_rsa Username:docker}
                                                
                                                I1102 13:46:15.819044 52370 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:46:15.845525 52370 node_ready.go:35] waiting up to 6m0s for node "no-preload-047294" to be "Ready" ...
I1102 13:46:15.915566 52370 docker.go:691] Got preloaded images: -- stdout --
gcr.io/k8s-minikube/gvisor-addon:2
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
-- /stdout --
I1102 13:46:15.915591 52370 cache_images.go:86] Images are preloaded, skipping loading
I1102 13:46:15.915598 52370 cache_images.go:264] succeeded pushing to: no-preload-047294
I1102 13:46:15.944858 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1102 13:46:16.021389 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1102 13:46:16.052080 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1102 13:46:16.052111 52370 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
I1102 13:46:16.090741 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
I1102 13:46:16.090772 52370 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
I1102 13:46:16.196294 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1102 13:46:16.196322 52370 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1102 13:46:16.224986 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
I1102 13:46:16.225016 52370 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
I1102 13:46:16.286042 52370 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:46:16.286065 52370 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1102 13:46:16.307552 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
I1102 13:46:16.307586 52370 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
I1102 13:46:16.366336 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
I1102 13:46:16.366388 52370 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
I1102 13:46:16.379015 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1102 13:46:16.471900 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
I1102 13:46:16.471927 52370 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
I1102 13:46:16.569682 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
I1102 13:46:16.569711 52370 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
I1102 13:46:16.657567 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
I1102 13:46:16.657596 52370 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
I1102 13:46:16.756983 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
I1102 13:46:16.757028 52370 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
I1102 13:46:16.809899 52370 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:46:16.809928 52370 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
I1102 13:46:16.871393 52370 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
I1102 13:46:14.218884 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:14.219557 52806 main.go:143] libmachine: no network interface addresses found for domain embed-certs-705938 (source=lease)
I1102 13:46:14.219571 52806 main.go:143] libmachine: trying to list again with source=arp
I1102 13:46:14.219887 52806 main.go:143] libmachine: unable to find current IP address of domain embed-certs-705938 in network mk-embed-certs-705938 (interfaces detected: [])
I1102 13:46:14.219923 52806 retry.go:31] will retry after 2.93345401s: waiting for domain to come up
I1102 13:46:17.155807 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.156857 52806 main.go:143] libmachine: domain embed-certs-705938 has current primary IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.156885 52806 main.go:143] libmachine: found domain IP: 192.168.50.135
I1102 13:46:17.156899 52806 main.go:143] libmachine: reserving static IP address...
	I1102 13:46:17.157383   52806 main.go:143] libmachine: unable to find host DHCP lease matching {name: "embed-certs-705938", mac: "52:54:00:54:db:56", ip: "192.168.50.135"} in network mk-embed-certs-705938
                                                
                                                I1102 13:46:17.415133 52806 main.go:143] libmachine: reserved static IP address 192.168.50.135 for domain embed-certs-705938
I1102 13:46:17.415181 52806 main.go:143] libmachine: waiting for SSH...
I1102 13:46:17.415189 52806 main.go:143] libmachine: Getting to WaitForSSH function...
I1102 13:46:17.418689 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.419255   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:minikube Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.419298 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.419531 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.419883   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.419903 52806 main.go:143] libmachine: About to run SSH command:
exit 0
I1102 13:46:17.534488 52806 main.go:143] libmachine: SSH cmd err, output: <nil>:
I1102 13:46:17.534961 52806 main.go:143] libmachine: domain creation complete
I1102 13:46:17.536764 52806 machine.go:94] provisionDockerMachine start ...
I1102 13:46:17.539495 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.539992   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.540028 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.540252 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.540560   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.540581 52806 main.go:143] libmachine: About to run SSH command:
hostname
I1102 13:46:17.652272 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
I1102 13:46:17.652300 52806 buildroot.go:166] provisioning hostname "embed-certs-705938"
I1102 13:46:17.655559 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.656028   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.656056 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.656296 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.656546   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.656563 52806 main.go:143] libmachine: About to run SSH command:
sudo hostname embed-certs-705938 && echo "embed-certs-705938" | sudo tee /etc/hostname
I1102 13:46:17.787544 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-705938
I1102 13:46:17.790765 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.791180   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.791204 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.791416 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:17.791651   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:17.791669 52806 main.go:143] libmachine: About to run SSH command:
if ! grep -xq '.*\sembed-certs-705938' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-705938/g' /etc/hosts;
else
echo '127.0.1.1 embed-certs-705938' | sudo tee -a /etc/hosts;
fi
fi
W1102 13:46:17.850464 52370 node_ready.go:57] node "no-preload-047294" has "Ready":"False" status (will retry)
I1102 13:46:18.162216 52370 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (2.140782403s)
I1102 13:46:18.162218 52370 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.783147201s)
I1102 13:46:18.162277 52370 addons.go:480] Verifying addon metrics-server=true in "no-preload-047294"
I1102 13:46:18.382893 52370 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.511434576s)
I1102 13:46:18.384606 52370 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
minikube -p no-preload-047294 addons enable metrics-server
I1102 13:46:18.386338 52370 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
I1102 13:46:18.387422 52370 addons.go:515] duration metric: took 2.899233176s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
I1102 13:46:19.849552 52370 node_ready.go:49] node "no-preload-047294" is "Ready"
I1102 13:46:19.849579 52370 node_ready.go:38] duration metric: took 4.004012408s for node "no-preload-047294" to be "Ready" ...
I1102 13:46:19.849594 52370 api_server.go:52] waiting for apiserver process to appear ...
I1102 13:46:19.849637 52370 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1102 13:46:19.896495 52370 api_server.go:72] duration metric: took 4.408344456s to wait for apiserver process to appear ...
I1102 13:46:19.896524 52370 api_server.go:88] waiting for apiserver healthz status ...
I1102 13:46:19.896551 52370 api_server.go:253] Checking apiserver healthz at https://192.168.72.63:8443/healthz ...
I1102 13:46:19.907446 52370 api_server.go:279] https://192.168.72.63:8443/healthz returned 200:
ok
I1102 13:46:19.909066 52370 api_server.go:141] control plane version: v1.34.1
I1102 13:46:19.909101 52370 api_server.go:131] duration metric: took 12.568216ms to wait for apiserver health ...
I1102 13:46:19.909111 52370 system_pods.go:43] waiting for kube-system pods to appear ...
I1102 13:46:19.920317 52370 system_pods.go:59] 8 kube-system pods found
I1102 13:46:19.920358 52370 system_pods.go:61] "coredns-66bc5c9577-th5tq" [65f5112e-1f3c-4f25-b91a-aa016db03acd] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:46:19.920365 52370 system_pods.go:61] "etcd-no-preload-047294" [150d4c86-f602-4a05-a8d3-6f54c6402abc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:46:19.920375 52370 system_pods.go:61] "kube-apiserver-no-preload-047294" [6d35fe6a-5edf-4a16-a84f-fd8f527f48fe] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:46:19.920380 52370 system_pods.go:61] "kube-controller-manager-no-preload-047294" [888bc1fb-1c42-44a9-aa46-ba3e9ee49ee4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:46:19.920389 52370 system_pods.go:61] "kube-proxy-nw5rx" [5dc5f78c-c165-402a-b834-f2f64e5ac4e2] Running
I1102 13:46:19.920395 52370 system_pods.go:61] "kube-scheduler-no-preload-047294" [af7ccc77-6a6b-4960-b6e4-63c69def3029] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:46:19.920400 52370 system_pods.go:61] "metrics-server-746fcd58dc-6pkxd" [65a57acb-e4e7-4fec-b299-0ae0667ed73a] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:46:19.920405 52370 system_pods.go:61] "storage-provisioner" [d6458b18-8300-444e-9e82-75cb7ba64d82] Running
I1102 13:46:19.920412 52370 system_pods.go:74] duration metric: took 11.217536ms to wait for pod list to return data ...
I1102 13:46:19.920422 52370 default_sa.go:34] waiting for default service account to be created ...
I1102 13:46:19.924360 52370 default_sa.go:45] found service account: "default"
I1102 13:46:19.924386 52370 default_sa.go:55] duration metric: took 3.955386ms for default service account to be created ...
I1102 13:46:19.924397 52370 system_pods.go:116] waiting for k8s-apps to be running ...
I1102 13:46:19.927824 52370 system_pods.go:86] 8 kube-system pods found
I1102 13:46:19.927857 52370 system_pods.go:89] "coredns-66bc5c9577-th5tq" [65f5112e-1f3c-4f25-b91a-aa016db03acd] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I1102 13:46:19.927868 52370 system_pods.go:89] "etcd-no-preload-047294" [150d4c86-f602-4a05-a8d3-6f54c6402abc] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I1102 13:46:19.927881 52370 system_pods.go:89] "kube-apiserver-no-preload-047294" [6d35fe6a-5edf-4a16-a84f-fd8f527f48fe] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I1102 13:46:19.927894 52370 system_pods.go:89] "kube-controller-manager-no-preload-047294" [888bc1fb-1c42-44a9-aa46-ba3e9ee49ee4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I1102 13:46:19.927904 52370 system_pods.go:89] "kube-proxy-nw5rx" [5dc5f78c-c165-402a-b834-f2f64e5ac4e2] Running
I1102 13:46:19.927914 52370 system_pods.go:89] "kube-scheduler-no-preload-047294" [af7ccc77-6a6b-4960-b6e4-63c69def3029] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I1102 13:46:19.927924 52370 system_pods.go:89] "metrics-server-746fcd58dc-6pkxd" [65a57acb-e4e7-4fec-b299-0ae0667ed73a] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I1102 13:46:19.927933 52370 system_pods.go:89] "storage-provisioner" [d6458b18-8300-444e-9e82-75cb7ba64d82] Running
I1102 13:46:19.927943 52370 system_pods.go:126] duration metric: took 3.538982ms to wait for k8s-apps to be running ...
I1102 13:46:19.927955 52370 system_svc.go:44] waiting for kubelet service to be running ....
I1102 13:46:19.928014 52370 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1102 13:46:19.954056 52370 system_svc.go:56] duration metric: took 26.090682ms WaitForService to wait for kubelet
I1102 13:46:19.954091 52370 kubeadm.go:587] duration metric: took 4.465942504s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1102 13:46:19.954114 52370 node_conditions.go:102] verifying NodePressure condition ...
I1102 13:46:19.957563 52370 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I1102 13:46:19.957595 52370 node_conditions.go:123] node cpu capacity is 2
I1102 13:46:19.957612 52370 node_conditions.go:105] duration metric: took 3.491442ms to run NodePressure ...
I1102 13:46:19.957627 52370 start.go:242] waiting for startup goroutines ...
I1102 13:46:19.957643 52370 start.go:247] waiting for cluster config update ...
I1102 13:46:19.957658 52370 start.go:256] writing updated cluster config ...
I1102 13:46:19.957996 52370 ssh_runner.go:195] Run: rm -f paused
I1102 13:46:19.963044 52370 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:46:19.966820 52370 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-th5tq" in "kube-system" namespace to be "Ready" or be gone ...
W1102 13:46:21.973445 52370 pod_ready.go:104] pod "coredns-66bc5c9577-th5tq" is not "Ready", error: <nil>
I1102 13:46:17.916169 52806 main.go:143] libmachine: SSH cmd err, output: <nil>:
	I1102 13:46:17.916200   52806 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21808-9383/.minikube CaCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21808-9383/.minikube}
                                                
                                                I1102 13:46:17.916222 52806 buildroot.go:174] setting up certificates
I1102 13:46:17.916235 52806 provision.go:84] configureAuth start
I1102 13:46:17.919973 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.920554   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.920609 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.923643 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.924156   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.924187 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:17.924321 52806 provision.go:143] copyHostCerts
I1102 13:46:17.924394 52806 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem, removing ...
I1102 13:46:17.924416 52806 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem
I1102 13:46:17.924496 52806 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/cert.pem (1123 bytes)
I1102 13:46:17.924606 52806 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem, removing ...
I1102 13:46:17.924617 52806 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem
I1102 13:46:17.924654 52806 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/key.pem (1675 bytes)
I1102 13:46:17.924721 52806 exec_runner.go:144] found /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem, removing ...
I1102 13:46:17.924729 52806 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem
I1102 13:46:17.924760 52806 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21808-9383/.minikube/ca.pem (1082 bytes)
I1102 13:46:17.924820 52806 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem org=jenkins.embed-certs-705938 san=[127.0.0.1 192.168.50.135 embed-certs-705938 localhost minikube]
I1102 13:46:17.990201 52806 provision.go:177] copyRemoteCerts
I1102 13:46:17.990258 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1102 13:46:17.993216 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.993704   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:17.993740 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:17.993903   52806 sshutil.go:53] new ssh client: &{IP:192.168.50.135 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa Username:docker}
                                                
                                                I1102 13:46:18.081294 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1102 13:46:18.121653 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
I1102 13:46:18.171219 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1102 13:46:18.212624 52806 provision.go:87] duration metric: took 296.37323ms to configureAuth
I1102 13:46:18.212658 52806 buildroot.go:189] setting minikube options for container-runtime
I1102 13:46:18.212891 52806 config.go:182] Loaded profile config "embed-certs-705938": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1102 13:46:18.216296 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:18.216768   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:18.216797 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:18.217050 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:18.217275   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:18.217289 52806 main.go:143] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1102 13:46:18.332652 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
I1102 13:46:18.332676 52806 buildroot.go:70] root file system type: tmpfs
I1102 13:46:18.332790 52806 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1102 13:46:18.336170 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:18.336665   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:18.336698 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:18.336965 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:18.337216   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:18.337304 52806 main.go:143] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1102 13:46:18.469436 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I1102 13:46:18.472332 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:18.472755   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:18.472785 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:18.472963 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:18.473174   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:18.473190 52806 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
                                                
                                                I1102 13:46:19.439525 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
I1102 13:46:19.439552 52806 machine.go:97] duration metric: took 1.902770314s to provisionDockerMachine
I1102 13:46:19.439563 52806 client.go:176] duration metric: took 19.960023s to LocalClient.Create
I1102 13:46:19.439582 52806 start.go:167] duration metric: took 19.960095763s to libmachine.API.Create "embed-certs-705938"
I1102 13:46:19.439592 52806 start.go:293] postStartSetup for "embed-certs-705938" (driver="kvm2")
I1102 13:46:19.439604 52806 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1102 13:46:19.439663 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1102 13:46:19.443408 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.445129   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.445175 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.445388   52806 sshutil.go:53] new ssh client: &{IP:192.168.50.135 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa Username:docker}
                                                
                                                I1102 13:46:19.536215 52806 ssh_runner.go:195] Run: cat /etc/os-release
I1102 13:46:19.541490 52806 info.go:137] Remote host: Buildroot 2025.02
I1102 13:46:19.541514 52806 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/addons for local assets ...
I1102 13:46:19.541571 52806 filesync.go:126] Scanning /home/jenkins/minikube-integration/21808-9383/.minikube/files for local assets ...
I1102 13:46:19.541640 52806 filesync.go:149] local asset: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem -> 132702.pem in /etc/ssl/certs
I1102 13:46:19.541722 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1102 13:46:19.553427 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:19.583514 52806 start.go:296] duration metric: took 143.906312ms for postStartSetup
I1102 13:46:19.586330 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.586696   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.586720 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:19.586940 52806 profile.go:143] Saving config to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/config.json ...
I1102 13:46:19.587159 52806 start.go:128] duration metric: took 20.109926215s to createHost
I1102 13:46:19.589294 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.589616   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.589639 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:19.589812 52806 main.go:143] libmachine: Using SSH client type: native
	I1102 13:46:19.590068   52806 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x841760] 0x844460 <nil>  [] 0s} 192.168.50.135 22 <nil> <nil>}
                                                
                                                I1102 13:46:19.590081 52806 main.go:143] libmachine: About to run SSH command:
date +%s.%N
I1102 13:46:19.697000 52806 main.go:143] libmachine: SSH cmd err, output: <nil>: 1762091179.664541650
I1102 13:46:19.697035 52806 fix.go:216] guest clock: 1762091179.664541650
I1102 13:46:19.697045 52806 fix.go:229] Guest: 2025-11-02 13:46:19.66454165 +0000 UTC Remote: 2025-11-02 13:46:19.587172147 +0000 UTC m=+51.820715414 (delta=77.369503ms)
I1102 13:46:19.697069 52806 fix.go:200] guest clock delta is within tolerance: 77.369503ms
I1102 13:46:19.697075 52806 start.go:83] releasing machines lock for "embed-certs-705938", held for 20.220009213s
I1102 13:46:19.700081 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.700529   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.700564 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:19.700817 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:46:19.700870 52806 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:46:19.700882 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:46:19.700918 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:46:19.700956 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:46:19.700992 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:46:19.701052 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:19.701138 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:46:19.703264 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.703658   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:19.703684 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:19.703829   52806 sshutil.go:53] new ssh client: &{IP:192.168.50.135 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21808-9383/.minikube/machines/embed-certs-705938/id_rsa Username:docker}
                                                
                                                I1102 13:46:19.809921 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:46:19.839404 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:46:19.869848 52806 ssh_runner.go:195] Run: openssl version
I1102 13:46:19.880751 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:46:19.898132 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:19.905270 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:19.905327 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:19.914387 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:46:19.928810 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:46:19.942549 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:46:19.947838 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:46:19.947903 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:46:19.956624 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:46:19.971483 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:46:19.986416 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:46:19.991713 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:46:19.991778 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:46:20.000084 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:46:20.014637 52806 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-certificates >/dev/null 2>&1 && sudo update-ca-certificates || true"
I1102 13:46:20.019384 52806 ssh_runner.go:195] Run: /bin/sh -c "command -v update-ca-trust >/dev/null 2>&1 && sudo update-ca-trust extract || true"
I1102 13:46:20.024376 52806 ssh_runner.go:195] Run: cat /version.json
I1102 13:46:20.024475 52806 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1102 13:46:20.037036 52806 ssh_runner.go:195] Run: systemctl --version
I1102 13:46:20.059823 52806 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W1102 13:46:20.066621 52806 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1102 13:46:20.066689   52806 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
                                                
                                                I1102 13:46:20.091927 52806 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I1102 13:46:20.091955 52806 start.go:496] detecting cgroup driver to use...
I1102 13:46:20.092085 52806 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:46:20.118748 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I1102 13:46:20.133743 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1102 13:46:20.149619 52806 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1102 13:46:20.149693 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1102 13:46:20.164558 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:20.184822 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1102 13:46:20.201265 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1102 13:46:20.220488 52806 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1102 13:46:20.250102 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1102 13:46:20.278612 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1102 13:46:20.309477 52806 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1102 13:46:20.327666 52806 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1102 13:46:20.340974 52806 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1102 13:46:20.341039 52806 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1102 13:46:20.354897 52806 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1102 13:46:20.369758 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:20.554281 52806 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1102 13:46:20.607028 52806 start.go:496] detecting cgroup driver to use...
I1102 13:46:20.607123 52806 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1102 13:46:20.628869 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:20.647644 52806 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1102 13:46:20.669638 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1102 13:46:20.687700 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:20.705265 52806 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I1102 13:46:20.744370 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1102 13:46:20.763516 52806 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1102 13:46:20.788424 52806 ssh_runner.go:195] Run: which cri-dockerd
I1102 13:46:20.793415 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1102 13:46:20.810121 52806 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I1102 13:46:20.832924 52806 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1102 13:46:20.998814 52806 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1102 13:46:21.155552 52806 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
I1102 13:46:21.155678 52806 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1102 13:46:21.177952 52806 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:46:21.193864 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:21.345292 52806 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:46:21.823497 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1102 13:46:21.839482 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1102 13:46:21.855805 52806 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1102 13:46:21.875591 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:21.891152 52806 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1102 13:46:22.040770 52806 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1102 13:46:22.187323 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:22.362141 52806 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1102 13:46:22.413868 52806 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I1102 13:46:22.429826 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:22.603137 52806 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1102 13:46:22.723299 52806 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1102 13:46:22.746135 52806 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1102 13:46:22.746228 52806 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1102 13:46:22.755329 52806 start.go:564] Will wait 60s for crictl version
I1102 13:46:22.755401 52806 ssh_runner.go:195] Run: which crictl
I1102 13:46:22.761309 52806 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1102 13:46:22.817847 52806 start.go:580] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.5.1
RuntimeApiVersion: v1
	I1102 13:46:22.817936   52806 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                	I1102 13:46:22.848957   52806 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
                                                
                                                W1102 13:46:23.976265 52370 pod_ready.go:104] pod "coredns-66bc5c9577-th5tq" is not "Ready", error: <nil>
I1102 13:46:24.975676 52370 pod_ready.go:94] pod "coredns-66bc5c9577-th5tq" is "Ready"
I1102 13:46:24.975717 52370 pod_ready.go:86] duration metric: took 5.00887023s for pod "coredns-66bc5c9577-th5tq" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:24.979834 52370 pod_ready.go:83] waiting for pod "etcd-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:24.995163 52370 pod_ready.go:94] pod "etcd-no-preload-047294" is "Ready"
I1102 13:46:24.995215 52370 pod_ready.go:86] duration metric: took 15.356364ms for pod "etcd-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:25.003843 52370 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
W1102 13:46:27.014037 52370 pod_ready.go:104] pod "kube-apiserver-no-preload-047294" is not "Ready", error: <nil>
I1102 13:46:22.882470 52806 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
I1102 13:46:22.886180 52806 main.go:143] libmachine: domain embed-certs-705938 has defined MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
	I1102 13:46:22.886769   52806 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:54:db:56", ip: ""} in network mk-embed-certs-705938: {Iface:virbr2 ExpiryTime:2025-11-02 14:46:15 +0000 UTC Type:0 Mac:52:54:00:54:db:56 Iaid: IPaddr:192.168.50.135 Prefix:24 Hostname:embed-certs-705938 Clientid:01:52:54:00:54:db:56}
                                                
                                                I1102 13:46:22.886809 52806 main.go:143] libmachine: domain embed-certs-705938 has defined IP address 192.168.50.135 and MAC address 52:54:00:54:db:56 in network mk-embed-certs-705938
I1102 13:46:22.887103 52806 ssh_runner.go:195] Run: grep 192.168.50.1 host.minikube.internal$ /etc/hosts
	I1102 13:46:22.892228   52806 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.50.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                	I1102 13:46:22.908232   52806 kubeadm.go:884] updating cluster {Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
                                                
                                                .34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.50.135 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker Bina
                                                
                                                ryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1102 13:46:22.908443 52806 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1102 13:46:22.908529   52806 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:22.931739 52806 docker.go:691] Got preloaded images:
I1102 13:46:22.931772 52806 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.1 wasn't preloaded
I1102 13:46:22.931831 52806 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1102 13:46:22.944369 52806 ssh_runner.go:195] Run: which lz4
I1102 13:46:22.949025 52806 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
I1102 13:46:22.954479 52806 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/preloaded.tar.lz4': No such file or directory
I1102 13:46:22.954523 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (353378914 bytes)
I1102 13:46:24.467441 52806 docker.go:655] duration metric: took 1.518464633s to copy over tarball
I1102 13:46:24.467523 52806 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
I1102 13:46:25.935982 52806 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.468432911s)
I1102 13:46:25.936019 52806 ssh_runner.go:146] rm: /preloaded.tar.lz4
I1102 13:46:25.973464 52806 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
I1102 13:46:25.985858 52806 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
I1102 13:46:26.006812 52806 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I1102 13:46:26.021974 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:26.196874 52806 ssh_runner.go:195] Run: sudo systemctl restart docker
I1102 13:46:29.016780 52370 pod_ready.go:94] pod "kube-apiserver-no-preload-047294" is "Ready"
I1102 13:46:29.016820 52370 pod_ready.go:86] duration metric: took 4.01294547s for pod "kube-apiserver-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.021107 52370 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.027163 52370 pod_ready.go:94] pod "kube-controller-manager-no-preload-047294" is "Ready"
I1102 13:46:29.027191 52370 pod_ready.go:86] duration metric: took 6.0502ms for pod "kube-controller-manager-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.030257 52370 pod_ready.go:83] waiting for pod "kube-proxy-nw5rx" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.035459 52370 pod_ready.go:94] pod "kube-proxy-nw5rx" is "Ready"
I1102 13:46:29.035493 52370 pod_ready.go:86] duration metric: took 5.206198ms for pod "kube-proxy-nw5rx" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.039535 52370 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.238466 52370 pod_ready.go:94] pod "kube-scheduler-no-preload-047294" is "Ready"
I1102 13:46:29.238500 52370 pod_ready.go:86] duration metric: took 198.942184ms for pod "kube-scheduler-no-preload-047294" in "kube-system" namespace to be "Ready" or be gone ...
I1102 13:46:29.238516 52370 pod_ready.go:40] duration metric: took 9.275441602s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I1102 13:46:29.299272 52370 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0)
I1102 13:46:29.301171 52370 out.go:179] * Done! kubectl is now configured to use "no-preload-047294" cluster and "default" namespace by default
I1102 13:46:29.244892 52806 ssh_runner.go:235] Completed: sudo systemctl restart docker: (3.04797346s)
	I1102 13:46:29.244991   52806 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
                                                
                                                I1102 13:46:29.271075 52806 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I1102 13:46:29.271116 52806 cache_images.go:86] Images are preloaded, skipping loading
	I1102 13:46:29.271130   52806 kubeadm.go:935] updating node { 192.168.50.135 8443 v1.34.1 docker true true} ...
                                                
                                                I1102 13:46:29.271241 52806 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-705938 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.50.135
[Install]
config:
	{KubernetesVersion:v1.34.1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
                                                
                                                	I1102 13:46:29.271306   52806 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
                                                
                                                I1102 13:46:29.330903 52806 cni.go:84] Creating CNI manager for ""
I1102 13:46:29.330935 52806 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I1102 13:46:29.330949 52806 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1102 13:46:29.330975   52806 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.50.135 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-705938 NodeName:embed-certs-705938 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.50.135"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.50.135 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
                                                
                                                dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1102 13:46:29.331165 52806 kubeadm.go:196] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.50.135
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "embed-certs-705938"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.50.135"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.50.135"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1102 13:46:29.331238 52806 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
I1102 13:46:29.345091 52806 binaries.go:44] Found k8s binaries, skipping transfer
I1102 13:46:29.345162 52806 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1102 13:46:29.358857 52806 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (319 bytes)
I1102 13:46:29.382829 52806 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1102 13:46:29.407569 52806 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
I1102 13:46:29.435296 52806 ssh_runner.go:195] Run: grep 192.168.50.135 control-plane.minikube.internal$ /etc/hosts
	I1102 13:46:29.439901   52806 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.50.135	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
                                                
                                                I1102 13:46:29.455934 52806 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1102 13:46:29.619190 52806 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1102 13:46:29.653107 52806 certs.go:69] Setting up /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938 for IP: 192.168.50.135
I1102 13:46:29.653135 52806 certs.go:195] generating shared ca certs ...
	I1102 13:46:29.653158   52806 certs.go:227] acquiring lock for ca certs: {Name:mk8ca472744959dc88f74e7c4ca834685146022e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:29.653386 52806 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key
I1102 13:46:29.653455 52806 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key
I1102 13:46:29.653470 52806 certs.go:257] generating profile certs ...
I1102 13:46:29.653543 52806 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.key
I1102 13:46:29.653561 52806 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.crt with IP's: []
I1102 13:46:29.893751 52806 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.crt ...
	I1102 13:46:29.893784   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.crt: {Name:mk6a7ff5531c8d47f764b9b3f8a2b9684864662f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:29.893948 52806 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.key ...
	I1102 13:46:29.893961   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/client.key: {Name:mk3e34eea0dea3b01b1d79fb7864361caeccf43b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:29.894037 52806 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9
I1102 13:46:29.894054 52806 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.50.135]
I1102 13:46:30.525555 52806 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9 ...
	I1102 13:46:30.525583   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9: {Name:mkb6d8b5233847de816b3d65ee8c7a12eff19517 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.525775 52806 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9 ...
	I1102 13:46:30.525797   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9: {Name:mk20f67c0c009e78f2c3037ed1603d1fad28fffa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.525890 52806 certs.go:382] copying /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt.ed6fd5b9 -> /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt
I1102 13:46:30.525957 52806 certs.go:386] copying /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key.ed6fd5b9 -> /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key
I1102 13:46:30.526012 52806 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key
I1102 13:46:30.526027 52806 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt with IP's: []
I1102 13:46:30.638271 52806 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt ...
	I1102 13:46:30.638301   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt: {Name:mk3dac91a58b003a9db0fc034b457a850bea98c4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.638492 52806 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key ...
	I1102 13:46:30.638511   52806 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key: {Name:mkf0dc7f3c198ddf7eee23643f3e8cc58cf6505a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
                                                
                                                I1102 13:46:30.638773 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem (1338 bytes)
W1102 13:46:30.638826 52806 certs.go:480] ignoring /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270_empty.pem, impossibly tiny 0 bytes
I1102 13:46:30.638838 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca-key.pem (1675 bytes)
I1102 13:46:30.638859 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/ca.pem (1082 bytes)
I1102 13:46:30.638881 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/cert.pem (1123 bytes)
I1102 13:46:30.638901 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/certs/key.pem (1675 bytes)
I1102 13:46:30.638939 52806 certs.go:484] found cert: /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem (1708 bytes)
I1102 13:46:30.639519 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1102 13:46:30.674138 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1102 13:46:30.703977 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1102 13:46:30.734195 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I1102 13:46:30.764670 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
I1102 13:46:30.794535 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1102 13:46:30.825092 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1102 13:46:30.856377 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/profiles/embed-certs-705938/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I1102 13:46:30.887933 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1102 13:46:30.919173 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/certs/13270.pem --> /usr/share/ca-certificates/13270.pem (1338 bytes)
I1102 13:46:30.952120 52806 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21808-9383/.minikube/files/etc/ssl/certs/132702.pem --> /usr/share/ca-certificates/132702.pem (1708 bytes)
I1102 13:46:30.984088 52806 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1102 13:46:31.005017 52806 ssh_runner.go:195] Run: openssl version
I1102 13:46:31.011314 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1102 13:46:31.024540 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:31.029805 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 12:47 /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:31.029866 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1102 13:46:31.037318 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1102 13:46:31.048848 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/13270.pem && ln -fs /usr/share/ca-certificates/13270.pem /etc/ssl/certs/13270.pem"
I1102 13:46:31.063024 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/13270.pem
I1102 13:46:31.068655 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 12:54 /usr/share/ca-certificates/13270.pem
I1102 13:46:31.068711 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/13270.pem
I1102 13:46:31.076429 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/13270.pem /etc/ssl/certs/51391683.0"
I1102 13:46:31.090294 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/132702.pem && ln -fs /usr/share/ca-certificates/132702.pem /etc/ssl/certs/132702.pem"
I1102 13:46:31.106103 52806 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/132702.pem
I1102 13:46:31.111708 52806 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 12:54 /usr/share/ca-certificates/132702.pem
I1102 13:46:31.111774 52806 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/132702.pem
I1102 13:46:31.119459 52806 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/132702.pem /etc/ssl/certs/3ec20f2e.0"
I1102 13:46:31.133305 52806 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1102 13:46:31.138171 52806 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I1102 13:46:31.138225   52806 kubeadm.go:401] StartCluster: {Name:embed-certs-705938 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21800/minikube-v1.37.0-1761658712-21800-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
                                                
                                                .1 ClusterName:embed-certs-705938 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.50.135 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryM
                                                
                                                irror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1102 13:46:31.138356   52806 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
                                                
                                                I1102 13:46:31.159777 52806 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1102 13:46:31.174591 52806 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1102 13:46:31.188149 52806 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1102 13:46:31.199556 52806 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1102 13:46:31.199580 52806 kubeadm.go:158] found existing configuration files:
I1102 13:46:31.199642 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1102 13:46:31.210416 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1102 13:46:31.210474 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1102 13:46:31.221683 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1102 13:46:31.232410 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1102 13:46:31.232471 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1102 13:46:31.244414 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1102 13:46:31.258650 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1102 13:46:31.258712 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1102 13:46:31.276559 52806 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1102 13:46:31.291616 52806 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1102 13:46:31.291687 52806 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1102 13:46:31.304373 52806 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
I1102 13:46:31.449184 52806 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
==> Docker <==
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.757521674Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.757621103Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.764517503Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 02 13:45:46 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:46.764566038Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:45:48 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:45:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ef8db52292f8faa4336702a5689c4aadadefb916cb189c72d5c2b1fefc930db3/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
Nov 02 13:45:48 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:45:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a0100eda4036460db98838fc48d07f6b6fd8700c11b989bf7487f9e41aa9668b/resolv.conf as [nameserver 192.168.122.1]"
Nov 02 13:45:59 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:59.663521843Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:45:59 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:59.735231937Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:45:59 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:45:59.735481063Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 02 13:45:59 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:45:59Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.601657354Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.601789643Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.605681369Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 02 13:46:02 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:02.605739290Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:03 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:03.687296590Z" level=info msg="ignoring event" container=b95dfdd056106acfa9cdc1cf1ba50a3780d9d74986af960c5d2bdc429f93db22 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Nov 02 13:46:37 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:46:37Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
                                                
                                                Nov 02 13:46:37 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:46:37Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-kvgcr_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"282b9f80c34567e9a258a7104324cc16ebe1953cc5052244da256a206b63b697\""
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.294547278Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.295850957Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.300503039Z" level=error msg="unexpected HTTP error handling" error="<nil>"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.300571465Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.418312647Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.502212145Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 dockerd[1147]: time="2025-11-02T13:46:38.502318651Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
Nov 02 13:46:38 default-k8s-diff-port-311562 cri-dockerd[1519]: time="2025-11-02T13:46:38Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
d3dc8b7583ba2 6e38f40d628db 3 seconds ago Running storage-provisioner 2 6936b7f5fd891 storage-provisioner
b1dc1a770a414 56cc512116c8f 53 seconds ago Running busybox 1 ef8db52292f8f busybox
89d0786d3647a 52546a367cc9e 53 seconds ago Running coredns 1 a0100eda40364 coredns-66bc5c9577-bnv4n
1019700155db8 kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93 55 seconds ago Running kubernetes-dashboard 0 399dad2b92a36 kubernetes-dashboard-855c9754f9-2f7cj
b95dfdd056106 6e38f40d628db About a minute ago Exited storage-provisioner 1 6936b7f5fd891 storage-provisioner
8965cdb1826d1 fc25172553d79 About a minute ago Running kube-proxy 1 5d16aead1b590 kube-proxy-5qv84
5ce993c62ff5a 5f1f5298c888d About a minute ago Running etcd 1 387fbeb352456 etcd-default-k8s-diff-port-311562
a38000da02ecd c80c8dbafe7dd About a minute ago Running kube-controller-manager 1 e876eb1a5a629 kube-controller-manager-default-k8s-diff-port-311562
396a831dc8f66 7dd6aaa1717ab About a minute ago Running kube-scheduler 1 40e0522270828 kube-scheduler-default-k8s-diff-port-311562
5d1c57648acf7 c3994bc696102 About a minute ago Running kube-apiserver 1 bdda83de52444 kube-apiserver-default-k8s-diff-port-311562
08b56ffa53d8c gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 2 minutes ago Exited busybox 0 c144cc94943b5 busybox
0bebcbbb6c23d 52546a367cc9e 2 minutes ago Exited coredns 0 dfd9d91f746b2 coredns-66bc5c9577-bnv4n
859f8a49dca1a fc25172553d79 2 minutes ago Exited kube-proxy 0 38a8172039765 kube-proxy-5qv84
326385a03f875 c3994bc696102 2 minutes ago Exited kube-apiserver 0 20a067500af02 kube-apiserver-default-k8s-diff-port-311562
3db15e0e0d1e0 7dd6aaa1717ab 2 minutes ago Exited kube-scheduler 0 9c5c22752c78c kube-scheduler-default-k8s-diff-port-311562
693e1d6c029a6 c80c8dbafe7dd 2 minutes ago Exited kube-controller-manager 0 4f4d927b9b110 kube-controller-manager-default-k8s-diff-port-311562
3d076807ce989 5f1f5298c888d 2 minutes ago Exited etcd 0 efae7bca22ce8 etcd-default-k8s-diff-port-311562
==> coredns [0bebcbbb6c23] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [89d0786d3647] <==
maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = ecad3ac8c72227dcf0d7a418ea5051ee155dd74d241a13c4787cc61906568517b5647c8519c78ef2c6b724422ee4b03d6cfb27e9a87140163726e83184faf782
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:35717 - 28729 "HINFO IN 7560457005306701952.7607275637384721463. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.111534194s
==> describe nodes <==
Name: default-k8s-diff-port-311562
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=default-k8s-diff-port-311562
kubernetes.io/os=linux
minikube.k8s.io/commit=170a9221ec214abbddb4c7cdac340516a92b239a
minikube.k8s.io/name=default-k8s-diff-port-311562
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_11_02T13_44_20_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 02 Nov 2025 13:44:16 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: default-k8s-diff-port-311562
AcquireTime: <unset>
RenewTime: Sun, 02 Nov 2025 13:46:37 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:44:14 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:44:14 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:44:14 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sun, 02 Nov 2025 13:46:37 +0000 Sun, 02 Nov 2025 13:45:39 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.83.253
Hostname: default-k8s-diff-port-311562
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3035908Ki
pods: 110
System Info:
Machine ID: 96534bedb89b40eeaf23f1340aeb91ac
System UUID: 96534bed-b89b-40ee-af23-f1340aeb91ac
Boot ID: ca22851d-b60f-48c3-875e-f0d6e84558c1
Kernel Version: 6.6.95
OS Image: Buildroot 2025.02
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.5.1
Kubelet Version: v1.34.1
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m3s
kube-system coredns-66bc5c9577-bnv4n 100m (5%) 0 (0%) 70Mi (2%) 170Mi (5%) 2m16s
kube-system etcd-default-k8s-diff-port-311562 100m (5%) 0 (0%) 100Mi (3%) 0 (0%) 2m21s
kube-system kube-apiserver-default-k8s-diff-port-311562 250m (12%) 0 (0%) 0 (0%) 0 (0%) 2m21s
kube-system kube-controller-manager-default-k8s-diff-port-311562 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m24s
kube-system kube-proxy-5qv84 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m17s
kube-system kube-scheduler-default-k8s-diff-port-311562 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m21s
kube-system metrics-server-746fcd58dc-tcttv 100m (5%) 0 (0%) 200Mi (6%) 0 (0%) 112s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m13s
kubernetes-dashboard dashboard-metrics-scraper-6ffb444bf9-tk9xk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 65s
kubernetes-dashboard kubernetes-dashboard-855c9754f9-2f7cj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 65s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 370Mi (12%) 170Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m14s kube-proxy
Normal Starting 67s kube-proxy
Normal NodeHasNoDiskPressure 2m21s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasNoDiskPressure
Normal NodeAllocatableEnforced 2m21s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 2m21s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientMemory
Normal NodeHasSufficientPID 2m21s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientPID
Normal Starting 2m21s kubelet Starting kubelet.
Normal NodeReady 2m20s kubelet Node default-k8s-diff-port-311562 status is now: NodeReady
Normal RegisteredNode 2m17s node-controller Node default-k8s-diff-port-311562 event: Registered Node default-k8s-diff-port-311562 in Controller
Normal NodeHasNoDiskPressure 74s (x8 over 74s) kubelet Node default-k8s-diff-port-311562 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientMemory 74s (x8 over 74s) kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientMemory
Normal Starting 74s kubelet Starting kubelet.
Normal NodeHasSufficientPID 74s (x7 over 74s) kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 74s kubelet Updated Node Allocatable limit across pods
Warning Rebooted 69s kubelet Node default-k8s-diff-port-311562 has been rebooted, boot id: ca22851d-b60f-48c3-875e-f0d6e84558c1
Normal RegisteredNode 66s node-controller Node default-k8s-diff-port-311562 event: Registered Node default-k8s-diff-port-311562 in Controller
Normal Starting 5s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 4s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 4s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4s kubelet Node default-k8s-diff-port-311562 status is now: NodeHasSufficientPID
==> dmesg <==
[Nov 2 13:45] Booted with the nomodeset parameter. Only the system framebuffer will be available
[ +0.000007] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
[ +0.000053] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +0.006822] (rpcbind)[118]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
[ +0.984941] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000025] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
[ +0.129738] kauditd_printk_skb: 161 callbacks suppressed
[ +0.100494] kauditd_printk_skb: 289 callbacks suppressed
[ +5.715230] kauditd_printk_skb: 165 callbacks suppressed
[ +4.746976] kauditd_printk_skb: 134 callbacks suppressed
[ +6.790105] kauditd_printk_skb: 141 callbacks suppressed
[Nov 2 13:46] kauditd_printk_skb: 132 callbacks suppressed
[ +0.215790] kauditd_printk_skb: 35 callbacks suppressed
==> etcd [3d076807ce98] <==
	{"level":"warn","ts":"2025-11-02T13:44:28.710383Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"132.249203ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/storageclasses/standard\" limit:1 ","response":"range_response_count:1 size:992"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.710431Z","caller":"traceutil/trace.go:172","msg":"trace[77773902] range","detail":"{range_begin:/registry/storageclasses/standard; range_end:; response_count:1; response_revision:362; }","duration":"132.31132ms","start":"2025-11-02T13:44:28.578110Z","end":"2025-11-02T13:44:28.710421Z","steps":["trace[77773902] 'agreement among raft nodes before linearized reading'  (duration: 113.91205ms)","trace[77773902] 'range keys from in-memory index tree'  (duration: 18.249766ms)"],"step_count":2}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.710599Z","caller":"traceutil/trace.go:172","msg":"trace[35326147] transaction","detail":"{read_only:false; response_revision:364; number_of_response:1; }","duration":"146.846351ms","start":"2025-11-02T13:44:28.563740Z","end":"2025-11-02T13:44:28.710586Z","steps":["trace[35326147] 'process raft request'  (duration: 146.798823ms)"],"step_count":1}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.710750Z","caller":"traceutil/trace.go:172","msg":"trace[1099436701] transaction","detail":"{read_only:false; response_revision:363; number_of_response:1; }","duration":"150.90502ms","start":"2025-11-02T13:44:28.559836Z","end":"2025-11-02T13:44:28.710741Z","steps":["trace[1099436701] 'process raft request'  (duration: 132.240173ms)","trace[1099436701] 'compare'  (duration: 18.185172ms)"],"step_count":2}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:28.711806Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"120.468313ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-311562\" limit:1 ","response":"range_response_count:1 size:5058"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.713796Z","caller":"traceutil/trace.go:172","msg":"trace[1836530902] range","detail":"{range_begin:/registry/pods/kube-system/kube-scheduler-default-k8s-diff-port-311562; range_end:; response_count:1; response_revision:364; }","duration":"122.467813ms","start":"2025-11-02T13:44:28.591320Z","end":"2025-11-02T13:44:28.713788Z","steps":["trace[1836530902] 'agreement among raft nodes before linearized reading'  (duration: 120.401561ms)"],"step_count":1}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:28.713620Z","caller":"traceutil/trace.go:172","msg":"trace[996614801] transaction","detail":"{read_only:false; response_revision:365; number_of_response:1; }","duration":"119.843122ms","start":"2025-11-02T13:44:28.593765Z","end":"2025-11-02T13:44:28.713609Z","steps":["trace[996614801] 'process raft request'  (duration: 118.595346ms)"],"step_count":1}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:50.852268Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:50.852399Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"default-k8s-diff-port-311562","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.253:2380"],"advertise-client-urls":["https://192.168.83.253:2379"]}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:50.852551Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.856103Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.862277Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.863439Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"499efbce2300e1e","current-leader-member-id":"499efbce2300e1e"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.863819Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.863947Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.864081Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.864504Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.83.253:2379: use of closed network connection"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:44:57.864685Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.83.253:2379: use of closed network connection"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.864841Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.253:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.864930Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.865110Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.869667Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.83.253:2380"}
                                                
                                                	{"level":"error","ts":"2025-11-02T13:44:57.869751Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.83.253:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.869819Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.83.253:2380"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:44:57.869925Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"default-k8s-diff-port-311562","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.83.253:2380"],"advertise-client-urls":["https://192.168.83.253:2379"]}
                                                
                                                ==> etcd [5ce993c62ff5] <==
	{"level":"warn","ts":"2025-11-02T13:45:31.330145Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38946","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.343996Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38960","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.361984Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38976","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.375631Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:38994","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.390071Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39026","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.405435Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39042","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.419030Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39072","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.439543Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39092","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.447139Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39122","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.455878Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39130","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.466974Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39158","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.484514Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39168","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.498690Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39186","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.528507Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39198","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.562814Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39216","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.571971Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39232","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.583731Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39250","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.593158Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39254","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.610839Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39284","server-name":"","error":"EOF"}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:31.670266Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:39302","server-name":"","error":"EOF"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:45:46.220299Z","caller":"traceutil/trace.go:172","msg":"trace[64781144] linearizableReadLoop","detail":"{readStateIndex:691; appliedIndex:691; }","duration":"376.246241ms","start":"2025-11-02T13:45:45.844031Z","end":"2025-11-02T13:45:46.220277Z","steps":["trace[64781144] 'read index received'  (duration: 376.241187ms)","trace[64781144] 'applied index is now lower than readState.Index'  (duration: 4.383µs)"],"step_count":2}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:46.220560Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"376.526623ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:45:46.220643Z","caller":"traceutil/trace.go:172","msg":"trace[815114890] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:652; }","duration":"376.698548ms","start":"2025-11-02T13:45:45.843937Z","end":"2025-11-02T13:45:46.220635Z","steps":["trace[815114890] 'agreement among raft nodes before linearized reading'  (duration: 376.504996ms)"],"step_count":1}
                                                
                                                	{"level":"warn","ts":"2025-11-02T13:45:46.222967Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"194.912257ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/coredns-66bc5c9577-bnv4n\" limit:1 ","response":"range_response_count:1 size:5703"}
                                                
                                                	{"level":"info","ts":"2025-11-02T13:45:46.223153Z","caller":"traceutil/trace.go:172","msg":"trace[59260110] range","detail":"{range_begin:/registry/pods/kube-system/coredns-66bc5c9577-bnv4n; range_end:; response_count:1; response_revision:652; }","duration":"195.053917ms","start":"2025-11-02T13:45:46.028034Z","end":"2025-11-02T13:45:46.223088Z","steps":["trace[59260110] 'agreement among raft nodes before linearized reading'  (duration: 194.846556ms)"],"step_count":1}
                                                
                                                ==> kernel <==
13:46:41 up 1 min, 0 users, load average: 1.71, 0.58, 0.21
Linux default-k8s-diff-port-311562 6.6.95 #1 SMP PREEMPT_DYNAMIC Tue Oct 28 16:58:05 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2025.02"
==> kube-apiserver [326385a03f87] <==
	W1102 13:45:00.210906       1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.243433       1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.262428       1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.318835       1 logging.go:55] [core] [Channel #4 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.419905       1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.423645       1 logging.go:55] [core] [Channel #13 SubChannel #15]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.432493       1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.443372       1 logging.go:55] [core] [Channel #247 SubChannel #249]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.498105       1 logging.go:55] [core] [Channel #183 SubChannel #185]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.538015       1 logging.go:55] [core] [Channel #171 SubChannel #173]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.543596       1 logging.go:55] [core] [Channel #27 SubChannel #29]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.556876       1 logging.go:55] [core] [Channel #175 SubChannel #177]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.604554       1 logging.go:55] [core] [Channel #111 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.621116       1 logging.go:55] [core] [Channel #63 SubChannel #65]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.632390       1 logging.go:55] [core] [Channel #187 SubChannel #189]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.632397       1 logging.go:55] [core] [Channel #239 SubChannel #241]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.664868       1 logging.go:55] [core] [Channel #59 SubChannel #61]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.713717       1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.731482       1 logging.go:55] [core] [Channel #235 SubChannel #237]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.751716       1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.776547       1 logging.go:55] [core] [Channel #131 SubChannel #133]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.830551       1 logging.go:55] [core] [Channel #67 SubChannel #69]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.861394       1 logging.go:55] [core] [Channel #87 SubChannel #89]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.893564       1 logging.go:55] [core] [Channel #195 SubChannel #197]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                	W1102 13:45:00.920873       1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
                                                
                                                ==> kube-apiserver [5d1c57648acf] <==
W1102 13:45:33.374282 1 handler_proxy.go:99] no RequestInfo found in the context
E1102 13:45:33.374620 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1102 13:45:33.374646 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
I1102 13:45:34.384503 1 controller.go:667] quota admission added evaluator for: deployments.apps
I1102 13:45:34.454503 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I1102 13:45:34.526510 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I1102 13:45:34.539619 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I1102 13:45:35.849585 1 controller.go:667] quota admission added evaluator for: endpoints
I1102 13:45:36.004255 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I1102 13:45:36.196794 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I1102 13:45:36.547782 1 controller.go:667] quota admission added evaluator for: namespaces
	I1102 13:45:36.984810       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.102.109.123"}
                                                
                                                	I1102 13:45:37.005946       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.98.142.247"}
                                                
                                                W1102 13:46:36.461564 1 handler_proxy.go:99] no RequestInfo found in the context
E1102 13:46:36.461774 1 controller.go:102] "Unhandled Error" err=<
loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1102 13:46:36.461799 1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
W1102 13:46:36.490050 1 handler_proxy.go:99] no RequestInfo found in the context
E1102 13:46:36.491243 1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
I1102 13:46:36.491555 1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
==> kube-controller-manager [693e1d6c029a] <==
I1102 13:44:24.032230 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I1102 13:44:24.033458 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1102 13:44:24.034449 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I1102 13:44:24.043978 1 shared_informer.go:356] "Caches are synced" controller="endpoint"
I1102 13:44:24.052411 1 shared_informer.go:356] "Caches are synced" controller="namespace"
I1102 13:44:24.065329 1 shared_informer.go:356] "Caches are synced" controller="job"
I1102 13:44:24.071035 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1102 13:44:24.071083 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1102 13:44:24.071089 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1102 13:44:24.072999 1 shared_informer.go:356] "Caches are synced" controller="persistent volume"
I1102 13:44:24.073369 1 shared_informer.go:356] "Caches are synced" controller="service account"
I1102 13:44:24.073659 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1102 13:44:24.073772 1 shared_informer.go:356] "Caches are synced" controller="cronjob"
I1102 13:44:24.074694 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I1102 13:44:24.082468 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1102 13:44:24.082876 1 shared_informer.go:356] "Caches are synced" controller="node"
I1102 13:44:24.085205 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
I1102 13:44:24.085454 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
I1102 13:44:24.085624 1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
I1102 13:44:24.085762 1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
I1102 13:44:24.089290 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1102 13:44:24.089578 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1102 13:44:24.089928 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="default-k8s-diff-port-311562"
I1102 13:44:24.092875 1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
I1102 13:44:24.115542 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="default-k8s-diff-port-311562" podCIDRs=["10.244.0.0/24"]
==> kube-controller-manager [a38000da02ec] <==
I1102 13:45:35.871183 1 shared_informer.go:356] "Caches are synced" controller="HPA"
I1102 13:45:35.881200 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I1102 13:45:35.884409 1 shared_informer.go:356] "Caches are synced" controller="service-cidr-controller"
I1102 13:45:35.884481 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I1102 13:45:35.887846 1 shared_informer.go:356] "Caches are synced" controller="taint"
I1102 13:45:35.887856 1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
I1102 13:45:35.887966 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I1102 13:45:35.887981 1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I1102 13:45:35.888061 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="default-k8s-diff-port-311562"
I1102 13:45:35.888110 1 node_lifecycle_controller.go:1025] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
I1102 13:45:35.888451 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I1102 13:45:35.888461 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I1102 13:45:35.888467 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
I1102 13:45:35.894403 1 shared_informer.go:356] "Caches are synced" controller="job"
I1102 13:45:35.907417 1 shared_informer.go:356] "Caches are synced" controller="disruption"
I1102 13:45:35.922833 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
E1102 13:45:36.762730 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.799547 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.816443 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.816681 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.832832 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
E1102 13:45:36.837400 1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
I1102 13:45:40.889022 1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
E1102 13:46:36.518593 1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
I1102 13:46:36.537792 1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
==> kube-proxy [859f8a49dca1] <==
I1102 13:44:26.731509 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1102 13:44:26.834249 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1102 13:44:26.834385 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.253"]
E1102 13:44:26.837097 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1102 13:44:27.009331 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1102 13:44:27.009396 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1102 13:44:27.009428 1 server_linux.go:132] "Using iptables Proxier"
I1102 13:44:27.053522 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1102 13:44:27.054108 1 server.go:527] "Version info" version="v1.34.1"
I1102 13:44:27.054173 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1102 13:44:27.079784 1 config.go:200] "Starting service config controller"
I1102 13:44:27.079817 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1102 13:44:27.081693 1 config.go:106] "Starting endpoint slice config controller"
I1102 13:44:27.082792 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1102 13:44:27.082853 1 config.go:403] "Starting serviceCIDR config controller"
I1102 13:44:27.082878 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1102 13:44:27.085725 1 config.go:309] "Starting node config controller"
I1102 13:44:27.085738 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1102 13:44:27.181791 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1102 13:44:27.182915 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1102 13:44:27.183118 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
I1102 13:44:27.186936 1 shared_informer.go:356] "Caches are synced" controller="node config"
==> kube-proxy [8965cdb1826d] <==
I1102 13:45:33.789139 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I1102 13:45:33.890490 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I1102 13:45:33.890552 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.83.253"]
E1102 13:45:33.890670 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1102 13:45:33.941906 1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Perhaps ip6tables or your kernel needs to be upgraded.
>
I1102 13:45:33.941978 1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I1102 13:45:33.942013 1 server_linux.go:132] "Using iptables Proxier"
I1102 13:45:33.952403 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1102 13:45:33.953896 1 server.go:527] "Version info" version="v1.34.1"
I1102 13:45:33.953939 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1102 13:45:33.966192 1 config.go:200] "Starting service config controller"
I1102 13:45:33.966301 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I1102 13:45:33.966403 1 config.go:106] "Starting endpoint slice config controller"
I1102 13:45:33.966477 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I1102 13:45:33.966505 1 config.go:403] "Starting serviceCIDR config controller"
I1102 13:45:33.966585 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I1102 13:45:33.969664 1 config.go:309] "Starting node config controller"
I1102 13:45:33.969703 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I1102 13:45:33.969711 1 shared_informer.go:356] "Caches are synced" controller="node config"
I1102 13:45:34.066523 1 shared_informer.go:356] "Caches are synced" controller="service config"
I1102 13:45:34.066534 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I1102 13:45:34.066730 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-scheduler [396a831dc8f6] <==
I1102 13:45:30.213095 1 serving.go:386] Generated self-signed cert in-memory
W1102 13:45:32.329054 1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W1102 13:45:32.329109 1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W1102 13:45:32.329124 1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
W1102 13:45:32.329130 1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I1102 13:45:32.441927 1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.1"
I1102 13:45:32.442042 1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1102 13:45:32.445945 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1102 13:45:32.446004 1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1102 13:45:32.446453 1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
I1102 13:45:32.446673 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I1102 13:45:32.547060 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
==> kube-scheduler [3db15e0e0d1e] <==
E1102 13:44:16.975955 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1102 13:44:16.976013 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E1102 13:44:16.976607 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E1102 13:44:16.978344 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1102 13:44:17.808669 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E1102 13:44:17.882355 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
E1102 13:44:17.895093 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass"
E1102 13:44:17.951002 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E1102 13:44:17.977073 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E1102 13:44:17.986009 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E1102 13:44:18.011730 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
E1102 13:44:18.046027 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E1102 13:44:18.060213 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E1102 13:44:18.120966 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E1102 13:44:18.133736 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E1102 13:44:18.239035 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E1102 13:44:18.280548 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E1102 13:44:18.416388 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
I1102 13:44:21.239623 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I1102 13:44:50.998036 1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
I1102 13:44:50.998908 1 server.go:263] "[graceful-termination] secure server has stopped listening"
I1102 13:44:51.001979 1 server.go:265] "[graceful-termination] secure server is exiting"
I1102 13:44:51.002470 1 configmap_cafile_content.go:226] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E1102 13:44:51.002473 1 run.go:72] "command failed" err="finished without leader elect"
I1102 13:44:50.998192 1 tlsconfig.go:258] "Shutting down DynamicServingCertificateController"
==> kubelet <==
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:37.536624 4148 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c5c22752c78c30bcc6d738f36262c2e4e32844883048aaf960fcb123556e4dc"
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:37.536946 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:37.551278 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:37 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:37.966742 4148 apiserver.go:52] "Watching apiserver"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.017306 4148 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.051537 4148 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/11842593-1fe8-476c-a692-ecdecf44fafa-xtables-lock\") pod \"kube-proxy-5qv84\" (UID: \"11842593-1fe8-476c-a692-ecdecf44fafa\") " pod="kube-system/kube-proxy-5qv84"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.051606 4148 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/11842593-1fe8-476c-a692-ecdecf44fafa-lib-modules\") pod \"kube-proxy-5qv84\" (UID: \"11842593-1fe8-476c-a692-ecdecf44fafa\") " pod="kube-system/kube-proxy-5qv84"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.051714 4148 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/ce313798-c158-4174-aec9-8d1e48caceea-tmp\") pod \"storage-provisioner\" (UID: \"ce313798-c158-4174-aec9-8d1e48caceea\") " pod="kube-system/storage-provisioner"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.278280 4148 scope.go:117] "RemoveContainer" containerID="b95dfdd056106acfa9cdc1cf1ba50a3780d9d74986af960c5d2bdc429f93db22"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.301712 4148 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.302320 4148 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.302814 4148 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-tcttv_kube-system(e9fc9174-d97e-4486-a4da-a405ebd4a7f3): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.302892 4148 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-tcttv" podUID="e9fc9174-d97e-4486-a4da-a405ebd4a7f3"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511095 4148 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511160 4148 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511235 4148 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-tk9xk_kubernetes-dashboard(dfc3630d-85e5-4ff8-8cd0-d0de23d3753a): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.511277 4148 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-tk9xk" podUID="dfc3630d-85e5-4ff8-8cd0-d0de23d3753a"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.599294 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.599738 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.600064 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: I1102 13:46:38.600308 4148 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.640609 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-controller-manager-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.645224 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-scheduler-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.650457 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-default-k8s-diff-port-311562\" already exists" pod="kube-system/etcd-default-k8s-diff-port-311562"
Nov 02 13:46:38 default-k8s-diff-port-311562 kubelet[4148]: E1102 13:46:38.660589 4148 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-default-k8s-diff-port-311562\" already exists" pod="kube-system/kube-apiserver-default-k8s-diff-port-311562"
==> kubernetes-dashboard [1019700155db] <==
2025/11/02 13:45:46 Using namespace: kubernetes-dashboard
2025/11/02 13:45:46 Using in-cluster config to connect to apiserver
2025/11/02 13:45:46 Using secret token for csrf signing
2025/11/02 13:45:46 Initializing csrf token from kubernetes-dashboard-csrf secret
2025/11/02 13:45:46 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
2025/11/02 13:45:46 Successful initial request to the apiserver, version: v1.34.1
2025/11/02 13:45:46 Generating JWE encryption key
2025/11/02 13:45:46 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
2025/11/02 13:45:46 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
2025/11/02 13:45:47 Initializing JWE encryption key from synchronized object
2025/11/02 13:45:47 Creating in-cluster Sidecar client
2025/11/02 13:45:47 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/11/02 13:45:47 Serving insecurely on HTTP port: 9090
2025/11/02 13:46:36 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
2025/11/02 13:45:46 Starting overwatch
==> storage-provisioner [b95dfdd05610] <==
I1102 13:45:33.650270 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F1102 13:46:03.661740 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
==> storage-provisioner [d3dc8b7583ba] <==
I1102 13:46:38.584324 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1102 13:46:38.609941 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1102 13:46:38.610912 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
W1102 13:46:38.628854 1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-311562 -n default-k8s-diff-port-311562
                                                
                                                helpers_test.go:269: (dbg) Run:  kubectl --context default-k8s-diff-port-311562 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
                                                
                                                helpers_test.go:280: non-running pods: metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk
helpers_test.go:282: ======> post-mortem[TestStartStop/group/default-k8s-diff-port/serial/Pause]: describe non-running pods <======
helpers_test.go:285: (dbg) Run: kubectl --context default-k8s-diff-port-311562 describe pod metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context default-k8s-diff-port-311562 describe pod metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk: exit status 1 (70.034956ms)
** stderr **
Error from server (NotFound): pods "metrics-server-746fcd58dc-tcttv" not found
Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-tk9xk" not found
** /stderr **
helpers_test.go:287: kubectl --context default-k8s-diff-port-311562 describe pod metrics-server-746fcd58dc-tcttv dashboard-metrics-scraper-6ffb444bf9-tk9xk: exit status 1
--- FAIL: TestStartStop/group/default-k8s-diff-port/serial/Pause (39.49s)